1 // SPDX-License-Identifier: GPL-2.0
3 * Resource Director Technology (RDT)
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
7 * Copyright (C) 2018 Intel Corporation
9 * Author: Reinette Chatre <reinette.chatre@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cacheinfo.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/mman.h>
20 #include <linux/pm_qos.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
24 #include <asm/cacheflush.h>
25 #include <asm/intel-family.h>
26 #include <asm/intel_rdt_sched.h>
27 #include <asm/perf_event.h>
29 #include "intel_rdt.h"
31 #define CREATE_TRACE_POINTS
32 #include "intel_rdt_pseudo_lock_event.h"
35 * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
36 * prefetcher state. Details about this register can be found in the MSR
37 * tables for specific platforms found in Intel's SDM.
39 #define MSR_MISC_FEATURE_CONTROL 0x000001a4
42 * The bits needed to disable hardware prefetching varies based on the
43 * platform. During initialization we will discover which bits to use.
45 static u64 prefetch_disable_bits;
48 * Major number assigned to and shared by all devices exposing
49 * pseudo-locked regions.
51 static unsigned int pseudo_lock_major;
52 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
53 static struct class *pseudo_lock_class;
56 * get_prefetch_disable_bits - prefetch disable bits of supported platforms
58 * Capture the list of platforms that have been validated to support
59 * pseudo-locking. This includes testing to ensure pseudo-locked regions
60 * with low cache miss rates can be created under variety of load conditions
61 * as well as that these pseudo-locked regions can maintain their low cache
62 * miss rates under variety of load conditions for significant lengths of time.
64 * After a platform has been validated to support pseudo-locking its
65 * hardware prefetch disable bits are included here as they are documented
68 * When adding a platform here also add support for its cache events to
69 * measure_cycles_perf_fn()
72 * If platform is supported, the bits to disable hardware prefetchers, 0
73 * if platform is not supported.
75 static u64 get_prefetch_disable_bits(void)
77 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
78 boot_cpu_data.x86 != 6)
81 switch (boot_cpu_data.x86_model) {
82 case INTEL_FAM6_BROADWELL_X:
84 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
86 * 0 L2 Hardware Prefetcher Disable (R/W)
87 * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
88 * 2 DCU Hardware Prefetcher Disable (R/W)
89 * 3 DCU IP Prefetcher Disable (R/W)
93 case INTEL_FAM6_ATOM_GOLDMONT:
94 case INTEL_FAM6_ATOM_GEMINI_LAKE:
96 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
98 * 0 L2 Hardware Prefetcher Disable (R/W)
100 * 2 DCU Hardware Prefetcher Disable (R/W)
110 * Helper to write 64bit value to MSR without tracing. Used when
111 * use of the cache should be restricted and use of registers used
112 * for local variables avoided.
114 static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
116 __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
120 * pseudo_lock_minor_get - Obtain available minor number
121 * @minor: Pointer to where new minor number will be stored
123 * A bitmask is used to track available minor numbers. Here the next free
124 * minor number is marked as unavailable and returned.
126 * Return: 0 on success, <0 on failure.
128 static int pseudo_lock_minor_get(unsigned int *minor)
130 unsigned long first_bit;
132 first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
134 if (first_bit == MINORBITS)
137 __clear_bit(first_bit, &pseudo_lock_minor_avail);
144 * pseudo_lock_minor_release - Return minor number to available
145 * @minor: The minor number made available
147 static void pseudo_lock_minor_release(unsigned int minor)
149 __set_bit(minor, &pseudo_lock_minor_avail);
153 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
154 * @minor: The minor number of the device representing pseudo-locked region
156 * When the character device is accessed we need to determine which
157 * pseudo-locked region it belongs to. This is done by matching the minor
158 * number of the device to the pseudo-locked region it belongs.
160 * Minor numbers are assigned at the time a pseudo-locked region is associated
161 * with a cache instance.
163 * Return: On success return pointer to resource group owning the pseudo-locked
164 * region, NULL on failure.
166 static struct rdtgroup *region_find_by_minor(unsigned int minor)
168 struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
170 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
171 if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
172 rdtgrp_match = rdtgrp;
180 * pseudo_lock_pm_req - A power management QoS request list entry
181 * @list: Entry within the @pm_reqs list for a pseudo-locked region
182 * @req: PM QoS request
184 struct pseudo_lock_pm_req {
185 struct list_head list;
186 struct dev_pm_qos_request req;
189 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
191 struct pseudo_lock_pm_req *pm_req, *next;
193 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
194 dev_pm_qos_remove_request(&pm_req->req);
195 list_del(&pm_req->list);
201 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
203 * To prevent the cache from being affected by power management entering
204 * C6 has to be avoided. This is accomplished by requesting a latency
205 * requirement lower than lowest C6 exit latency of all supported
206 * platforms as found in the cpuidle state tables in the intel_idle driver.
207 * At this time it is possible to do so with a single latency requirement
208 * for all supported platforms.
210 * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
211 * the ACPI latencies need to be considered while keeping in mind that C2
212 * may be set to map to deeper sleep states. In this case the latency
213 * requirement needs to prevent entering C2 also.
215 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
217 struct pseudo_lock_pm_req *pm_req;
221 for_each_cpu(cpu, &plr->d->cpu_mask) {
222 pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
224 rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
228 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
230 DEV_PM_QOS_RESUME_LATENCY,
233 rdt_last_cmd_printf("fail to add latency req cpu%d\n",
239 list_add(&pm_req->list, &plr->pm_reqs);
245 pseudo_lock_cstates_relax(plr);
250 * pseudo_lock_region_clear - Reset pseudo-lock region data
251 * @plr: pseudo-lock region
253 * All content of the pseudo-locked region is reset - any memory allocated
258 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
269 plr->debugfs_dir = NULL;
273 * pseudo_lock_region_init - Initialize pseudo-lock region information
274 * @plr: pseudo-lock region
276 * Called after user provided a schemata to be pseudo-locked. From the
277 * schemata the &struct pseudo_lock_region is on entry already initialized
278 * with the resource, domain, and capacity bitmask. Here the information
279 * required for pseudo-locking is deduced from this data and &struct
280 * pseudo_lock_region initialized further. This information includes:
281 * - size in bytes of the region to be pseudo-locked
282 * - cache line size to know the stride with which data needs to be accessed
283 * to be pseudo-locked
284 * - a cpu associated with the cache instance on which the pseudo-locking
285 * flow can be executed
287 * Return: 0 on success, <0 on failure. Descriptive error will be written
288 * to last_cmd_status buffer.
290 static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
292 struct cpu_cacheinfo *ci;
296 /* Pick the first cpu we find that is associated with the cache. */
297 plr->cpu = cpumask_first(&plr->d->cpu_mask);
299 if (!cpu_online(plr->cpu)) {
300 rdt_last_cmd_printf("cpu %u associated with cache not online\n",
306 ci = get_cpu_cacheinfo(plr->cpu);
308 plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
310 for (i = 0; i < ci->num_leaves; i++) {
311 if (ci->info_list[i].level == plr->r->cache_level) {
312 plr->line_size = ci->info_list[i].coherency_line_size;
318 rdt_last_cmd_puts("unable to determine cache line size\n");
320 pseudo_lock_region_clear(plr);
325 * pseudo_lock_init - Initialize a pseudo-lock region
326 * @rdtgrp: resource group to which new pseudo-locked region will belong
328 * A pseudo-locked region is associated with a resource group. When this
329 * association is created the pseudo-locked region is initialized. The
330 * details of the pseudo-locked region are not known at this time so only
331 * allocation is done and association established.
333 * Return: 0 on success, <0 on failure
335 static int pseudo_lock_init(struct rdtgroup *rdtgrp)
337 struct pseudo_lock_region *plr;
339 plr = kzalloc(sizeof(*plr), GFP_KERNEL);
343 init_waitqueue_head(&plr->lock_thread_wq);
344 INIT_LIST_HEAD(&plr->pm_reqs);
350 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
351 * @plr: pseudo-lock region
353 * Initialize the details required to set up the pseudo-locked region and
354 * allocate the contiguous memory that will be pseudo-locked to the cache.
356 * Return: 0 on success, <0 on failure. Descriptive error will be written
357 * to last_cmd_status buffer.
359 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
363 ret = pseudo_lock_region_init(plr);
368 * We do not yet support contiguous regions larger than
371 if (plr->size > KMALLOC_MAX_SIZE) {
372 rdt_last_cmd_puts("requested region exceeds maximum size\n");
377 plr->kmem = kzalloc(plr->size, GFP_KERNEL);
379 rdt_last_cmd_puts("unable to allocate memory\n");
387 pseudo_lock_region_clear(plr);
393 * pseudo_lock_free - Free a pseudo-locked region
394 * @rdtgrp: resource group to which pseudo-locked region belonged
396 * The pseudo-locked region's resources have already been released, or not
397 * yet created at this point. Now it can be freed and disassociated from the
402 static void pseudo_lock_free(struct rdtgroup *rdtgrp)
404 pseudo_lock_region_clear(rdtgrp->plr);
410 * pseudo_lock_fn - Load kernel memory into cache
411 * @_rdtgrp: resource group to which pseudo-lock region belongs
413 * This is the core pseudo-locking flow.
415 * First we ensure that the kernel memory cannot be found in the cache.
416 * Then, while taking care that there will be as little interference as
417 * possible, the memory to be loaded is accessed while core is running
418 * with class of service set to the bitmask of the pseudo-locked region.
419 * After this is complete no future CAT allocations will be allowed to
420 * overlap with this bitmask.
422 * Local register variables are utilized to ensure that the memory region
423 * to be locked is the only memory access made during the critical locking
426 * Return: 0. Waiter on waitqueue will be woken on completion.
428 static int pseudo_lock_fn(void *_rdtgrp)
430 struct rdtgroup *rdtgrp = _rdtgrp;
431 struct pseudo_lock_region *plr = rdtgrp->plr;
432 u32 rmid_p, closid_p;
436 * The registers used for local register variables are also used
437 * when KASAN is active. When KASAN is active we use a regular
438 * variable to ensure we always use a valid pointer, but the cost
439 * is that this variable will enter the cache through evicting the
440 * memory we are trying to lock into the cache. Thus expect lower
441 * pseudo-locking success rate when KASAN is active.
443 unsigned int line_size;
447 register unsigned int line_size asm("esi");
448 register unsigned int size asm("edi");
450 register void *mem_r asm("rbx");
452 register void *mem_r asm("ebx");
453 #endif /* CONFIG_X86_64 */
454 #endif /* CONFIG_KASAN */
457 * Make sure none of the allocated memory is cached. If it is we
458 * will get a cache hit in below loop from outside of pseudo-locked
460 * wbinvd (as opposed to clflush/clflushopt) is required to
461 * increase likelihood that allocated cache portion will be filled
462 * with associated memory.
467 * Always called with interrupts enabled. By disabling interrupts
468 * ensure that we will not be preempted during this critical section.
473 * Call wrmsr and rdmsr as directly as possible to avoid tracing
474 * clobbering local register variables or affecting cache accesses.
476 * Disable the hardware prefetcher so that when the end of the memory
477 * being pseudo-locked is reached the hardware will not read beyond
478 * the buffer and evict pseudo-locked memory read earlier from the
481 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
482 closid_p = this_cpu_read(pqr_state.cur_closid);
483 rmid_p = this_cpu_read(pqr_state.cur_rmid);
486 line_size = plr->line_size;
488 * Critical section begin: start by writing the closid associated
489 * with the capacity bitmask of the cache region being
490 * pseudo-locked followed by reading of kernel memory to load it
493 __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
495 * Cache was flushed earlier. Now access kernel memory to read it
496 * into cache region associated with just activated plr->closid.
497 * Loop over data twice:
498 * - In first loop the cache region is shared with the page walker
499 * as it populates the paging structure caches (including TLB).
500 * - In the second loop the paging structure caches are used and
501 * cache region is populated with the memory being referenced.
503 for (i = 0; i < size; i += PAGE_SIZE) {
505 * Add a barrier to prevent speculative execution of this
506 * loop reading beyond the end of the buffer.
509 asm volatile("mov (%0,%1,1), %%eax\n\t"
511 : "r" (mem_r), "r" (i)
514 for (i = 0; i < size; i += line_size) {
516 * Add a barrier to prevent speculative execution of this
517 * loop reading beyond the end of the buffer.
520 asm volatile("mov (%0,%1,1), %%eax\n\t"
522 : "r" (mem_r), "r" (i)
526 * Critical section end: restore closid with capacity bitmask that
527 * does not overlap with pseudo-locked region.
529 __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
531 /* Re-enable the hardware prefetcher(s) */
532 wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
535 plr->thread_done = 1;
536 wake_up_interruptible(&plr->lock_thread_wq);
541 * rdtgroup_monitor_in_progress - Test if monitoring in progress
542 * @r: resource group being queried
544 * Return: 1 if monitor groups have been created for this resource
545 * group, 0 otherwise.
547 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
549 return !list_empty(&rdtgrp->mon.crdtgrp_list);
553 * rdtgroup_locksetup_user_restrict - Restrict user access to group
554 * @rdtgrp: resource group needing access restricted
556 * A resource group used for cache pseudo-locking cannot have cpus or tasks
557 * assigned to it. This is communicated to the user by restricting access
558 * to all the files that can be used to make such changes.
560 * Permissions restored with rdtgroup_locksetup_user_restore()
562 * Return: 0 on success, <0 on failure. If a failure occurs during the
563 * restriction of access an attempt will be made to restore permissions but
564 * the state of the mode of these files will be uncertain when a failure
567 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
571 ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
575 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
579 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
583 if (rdt_mon_capable) {
584 ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
593 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
595 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
597 rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
603 * rdtgroup_locksetup_user_restore - Restore user access to group
604 * @rdtgrp: resource group needing access restored
606 * Restore all file access previously removed using
607 * rdtgroup_locksetup_user_restrict()
609 * Return: 0 on success, <0 on failure. If a failure occurs during the
610 * restoration of access an attempt will be made to restrict permissions
611 * again but the state of the mode of these files will be uncertain when
614 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
618 ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
622 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
626 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
630 if (rdt_mon_capable) {
631 ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
640 rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
642 rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
644 rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
650 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
651 * @rdtgrp: resource group requested to enter locksetup mode
653 * A resource group enters locksetup mode to reflect that it would be used
654 * to represent a pseudo-locked region and is in the process of being set
655 * up to do so. A resource group used for a pseudo-locked region would
656 * lose the closid associated with it so we cannot allow it to have any
657 * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
658 * future. Monitoring of a pseudo-locked region is not allowed either.
660 * The above and more restrictions on a pseudo-locked region are checked
661 * for and enforced before the resource group enters the locksetup mode.
663 * Returns: 0 if the resource group successfully entered locksetup mode, <0
664 * on failure. On failure the last_cmd_status buffer is updated with text to
665 * communicate details of failure to the user.
667 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
672 * The default resource group can neither be removed nor lose the
673 * default closid associated with it.
675 if (rdtgrp == &rdtgroup_default) {
676 rdt_last_cmd_puts("cannot pseudo-lock default group\n");
681 * Cache Pseudo-locking not supported when CDP is enabled.
683 * Some things to consider if you would like to enable this
684 * support (using L3 CDP as example):
685 * - When CDP is enabled two separate resources are exposed,
686 * L3DATA and L3CODE, but they are actually on the same cache.
687 * The implication for pseudo-locking is that if a
688 * pseudo-locked region is created on a domain of one
689 * resource (eg. L3CODE), then a pseudo-locked region cannot
690 * be created on that same domain of the other resource
691 * (eg. L3DATA). This is because the creation of a
692 * pseudo-locked region involves a call to wbinvd that will
693 * affect all cache allocations on particular domain.
694 * - Considering the previous, it may be possible to only
695 * expose one of the CDP resources to pseudo-locking and
696 * hide the other. For example, we could consider to only
697 * expose L3DATA and since the L3 cache is unified it is
698 * still possible to place instructions there are execute it.
699 * - If only one region is exposed to pseudo-locking we should
700 * still keep in mind that availability of a portion of cache
701 * for pseudo-locking should take into account both resources.
702 * Similarly, if a pseudo-locked region is created in one
703 * resource, the portion of cache used by it should be made
704 * unavailable to all future allocations from both resources.
706 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
707 rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
708 rdt_last_cmd_puts("CDP enabled\n");
713 * Not knowing the bits to disable prefetching implies that this
714 * platform does not support Cache Pseudo-Locking.
716 prefetch_disable_bits = get_prefetch_disable_bits();
717 if (prefetch_disable_bits == 0) {
718 rdt_last_cmd_puts("pseudo-locking not supported\n");
722 if (rdtgroup_monitor_in_progress(rdtgrp)) {
723 rdt_last_cmd_puts("monitoring in progress\n");
727 if (rdtgroup_tasks_assigned(rdtgrp)) {
728 rdt_last_cmd_puts("tasks assigned to resource group\n");
732 if (!cpumask_empty(&rdtgrp->cpu_mask)) {
733 rdt_last_cmd_puts("CPUs assigned to resource group\n");
737 if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
738 rdt_last_cmd_puts("unable to modify resctrl permissions\n");
742 ret = pseudo_lock_init(rdtgrp);
744 rdt_last_cmd_puts("unable to init pseudo-lock region\n");
749 * If this system is capable of monitoring a rmid would have been
750 * allocated when the control group was created. This is not needed
751 * anymore when this group would be used for pseudo-locking. This
752 * is safe to call on platforms not capable of monitoring.
754 free_rmid(rdtgrp->mon.rmid);
760 rdtgroup_locksetup_user_restore(rdtgrp);
766 * rdtgroup_locksetup_exit - resource group exist locksetup mode
767 * @rdtgrp: resource group
769 * When a resource group exits locksetup mode the earlier restrictions are
772 * Return: 0 on success, <0 on failure
774 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
778 if (rdt_mon_capable) {
781 rdt_last_cmd_puts("out of RMIDs\n");
784 rdtgrp->mon.rmid = ret;
787 ret = rdtgroup_locksetup_user_restore(rdtgrp);
789 free_rmid(rdtgrp->mon.rmid);
793 pseudo_lock_free(rdtgrp);
798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
802 * @d represents a cache instance and @_cbm a capacity bitmask that is
803 * considered for it. Determine if @_cbm overlaps with any existing
804 * pseudo-locked region on @d.
806 * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
809 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
811 unsigned long *cbm = (unsigned long *)&_cbm;
812 unsigned long *cbm_b;
813 unsigned int cbm_len;
816 cbm_len = d->plr->r->cache.cbm_len;
817 cbm_b = (unsigned long *)&d->plr->cbm;
818 if (bitmap_intersects(cbm, cbm_b, cbm_len))
825 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
826 * @d: RDT domain under test
828 * The setup of a pseudo-locked region affects all cache instances within
829 * the hierarchy of the region. It is thus essential to know if any
830 * pseudo-locked regions exist within a cache hierarchy to prevent any
831 * attempts to create new pseudo-locked regions in the same hierarchy.
833 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
834 * if it is not possible to test due to memory allocation issue,
837 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
839 cpumask_var_t cpu_with_psl;
840 struct rdt_resource *r;
841 struct rdt_domain *d_i;
844 if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
848 * First determine which cpus have pseudo-locked regions
849 * associated with them.
851 for_each_alloc_enabled_rdt_resource(r) {
852 list_for_each_entry(d_i, &r->domains, list) {
854 cpumask_or(cpu_with_psl, cpu_with_psl,
860 * Next test if new pseudo-locked region would intersect with
863 if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
866 free_cpumask_var(cpu_with_psl);
871 * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
872 * @_plr: pseudo-lock region to measure
874 * There is no deterministic way to test if a memory region is cached. One
875 * way is to measure how long it takes to read the memory, the speed of
876 * access is a good way to learn how close to the cpu the data was. Even
877 * more, if the prefetcher is disabled and the memory is read at a stride
878 * of half the cache line, then a cache miss will be easy to spot since the
879 * read of the first half would be significantly slower than the read of
882 * Return: 0. Waiter on waitqueue will be woken on completion.
884 static int measure_cycles_lat_fn(void *_plr)
886 struct pseudo_lock_region *plr = _plr;
891 * The registers used for local register variables are also used
892 * when KASAN is active. When KASAN is active we use a regular
893 * variable to ensure we always use a valid pointer to access memory.
894 * The cost is that accessing this pointer, which could be in
895 * cache, will be included in the measurement of memory read latency.
900 register void *mem_r asm("rbx");
902 register void *mem_r asm("ebx");
903 #endif /* CONFIG_X86_64 */
904 #endif /* CONFIG_KASAN */
908 * The wrmsr call may be reordered with the assignment below it.
909 * Call wrmsr as directly as possible to avoid tracing clobbering
910 * local register variable used for memory pointer.
912 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
915 * Dummy execute of the time measurement to load the needed
916 * instructions into the L1 instruction cache.
918 start = rdtsc_ordered();
919 for (i = 0; i < plr->size; i += 32) {
920 start = rdtsc_ordered();
921 asm volatile("mov (%0,%1,1), %%eax\n\t"
923 : "r" (mem_r), "r" (i)
925 end = rdtsc_ordered();
926 trace_pseudo_lock_mem_latency((u32)(end - start));
928 wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
930 plr->thread_done = 1;
931 wake_up_interruptible(&plr->lock_thread_wq);
935 static int measure_cycles_perf_fn(void *_plr)
937 unsigned long long l3_hits = 0, l3_miss = 0;
938 u64 l3_hit_bits = 0, l3_miss_bits = 0;
939 struct pseudo_lock_region *plr = _plr;
940 unsigned long long l2_hits, l2_miss;
941 u64 l2_hit_bits, l2_miss_bits;
945 * The registers used for local register variables are also used
946 * when KASAN is active. When KASAN is active we use regular variables
947 * at the cost of including cache access latency to these variables
948 * in the measurements.
950 unsigned int line_size;
954 register unsigned int line_size asm("esi");
955 register unsigned int size asm("edi");
957 register void *mem_r asm("rbx");
959 register void *mem_r asm("ebx");
960 #endif /* CONFIG_X86_64 */
961 #endif /* CONFIG_KASAN */
964 * Non-architectural event for the Goldmont Microarchitecture
965 * from Intel x86 Architecture Software Developer Manual (SDM):
966 * MEM_LOAD_UOPS_RETIRED D1H (event number)
973 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
974 * has two "no fix" errata associated with it: BDM35 and BDM100. On
975 * this platform we use the following events instead:
976 * L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
979 * LONGEST_LAT_CACHE 2EH (Documented in SDM)
985 * Start by setting flags for IA32_PERFEVTSELx:
986 * OS (Operating system mode) 0x2
987 * INT (APIC interrupt enable) 0x10
988 * EN (Enable counter) 0x40
990 * Then add the Umask value and event number to select performance
994 switch (boot_cpu_data.x86_model) {
995 case INTEL_FAM6_ATOM_GOLDMONT:
996 case INTEL_FAM6_ATOM_GEMINI_LAKE:
997 l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
998 l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
1000 case INTEL_FAM6_BROADWELL_X:
1001 /* On BDW the l2_hit_bits count references, not hits */
1002 l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
1003 l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
1004 /* On BDW the l3_hit_bits count references, not hits */
1005 l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
1006 l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
1012 local_irq_disable();
1014 * Call wrmsr direcly to avoid the local register variables from
1015 * being overwritten due to reordering of their assignment with
1018 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
1019 /* Disable events and reset counters */
1020 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
1021 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
1022 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
1023 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
1024 if (l3_hit_bits > 0) {
1025 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
1026 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
1027 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
1028 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
1030 /* Set and enable the L2 counters */
1031 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
1032 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
1033 if (l3_hit_bits > 0) {
1034 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
1036 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1041 line_size = plr->line_size;
1042 for (i = 0; i < size; i += line_size) {
1043 asm volatile("mov (%0,%1,1), %%eax\n\t"
1045 : "r" (mem_r), "r" (i)
1046 : "%eax", "memory");
1049 * Call wrmsr directly (no tracing) to not influence
1050 * the cache access counters as they are disabled.
1052 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
1053 l2_hit_bits & ~(0x40ULL << 16));
1054 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
1055 l2_miss_bits & ~(0x40ULL << 16));
1056 if (l3_hit_bits > 0) {
1057 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
1058 l3_hit_bits & ~(0x40ULL << 16));
1059 pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1060 l3_miss_bits & ~(0x40ULL << 16));
1062 l2_hits = native_read_pmc(0);
1063 l2_miss = native_read_pmc(1);
1064 if (l3_hit_bits > 0) {
1065 l3_hits = native_read_pmc(2);
1066 l3_miss = native_read_pmc(3);
1068 wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
1071 * On BDW we count references and misses, need to adjust. Sometimes
1072 * the "hits" counter is a bit more than the references, for
1073 * example, x references but x + 1 hits. To not report invalid
1074 * hit values in this case we treat that as misses eaqual to
1077 if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
1078 l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
1079 trace_pseudo_lock_l2(l2_hits, l2_miss);
1080 if (l3_hit_bits > 0) {
1081 if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
1082 l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
1083 trace_pseudo_lock_l3(l3_hits, l3_miss);
1087 plr->thread_done = 1;
1088 wake_up_interruptible(&plr->lock_thread_wq);
1093 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1095 * The measurement of latency to access a pseudo-locked region should be
1096 * done from a cpu that is associated with that pseudo-locked region.
1097 * Determine which cpu is associated with this region and start a thread on
1098 * that cpu to perform the measurement, wait for that thread to complete.
1100 * Return: 0 on success, <0 on failure
1102 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1104 struct pseudo_lock_region *plr = rdtgrp->plr;
1105 struct task_struct *thread;
1110 mutex_lock(&rdtgroup_mutex);
1112 if (rdtgrp->flags & RDT_DELETED) {
1117 plr->thread_done = 0;
1118 cpu = cpumask_first(&plr->d->cpu_mask);
1119 if (!cpu_online(cpu)) {
1125 thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
1127 "pseudo_lock_measure/%u",
1130 thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
1132 "pseudo_lock_measure/%u",
1137 if (IS_ERR(thread)) {
1138 ret = PTR_ERR(thread);
1141 kthread_bind(thread, cpu);
1142 wake_up_process(thread);
1144 ret = wait_event_interruptible(plr->lock_thread_wq,
1145 plr->thread_done == 1);
1152 mutex_unlock(&rdtgroup_mutex);
1157 static ssize_t pseudo_lock_measure_trigger(struct file *file,
1158 const char __user *user_buf,
1159 size_t count, loff_t *ppos)
1161 struct rdtgroup *rdtgrp = file->private_data;
1167 buf_size = min(count, (sizeof(buf) - 1));
1168 if (copy_from_user(buf, user_buf, buf_size))
1171 buf[buf_size] = '\0';
1172 ret = kstrtoint(buf, 10, &sel);
1176 ret = debugfs_file_get(file->f_path.dentry);
1179 ret = pseudo_lock_measure_cycles(rdtgrp, sel);
1182 debugfs_file_put(file->f_path.dentry);
1188 static const struct file_operations pseudo_measure_fops = {
1189 .write = pseudo_lock_measure_trigger,
1190 .open = simple_open,
1191 .llseek = default_llseek,
1195 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1196 * @rdtgrp: resource group to which pseudo-lock region belongs
1198 * Called when a resource group in the pseudo-locksetup mode receives a
1199 * valid schemata that should be pseudo-locked. Since the resource group is
1200 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1201 * allocated and initialized with the essential information. If a failure
1202 * occurs the resource group remains in the pseudo-locksetup mode with the
1203 * &struct pseudo_lock_region associated with it, but cleared from all
1204 * information and ready for the user to re-attempt pseudo-locking by
1205 * writing the schemata again.
1207 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1208 * on failure. Descriptive error will be written to last_cmd_status buffer.
1210 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1212 struct pseudo_lock_region *plr = rdtgrp->plr;
1213 struct task_struct *thread;
1214 unsigned int new_minor;
1218 ret = pseudo_lock_region_alloc(plr);
1222 ret = pseudo_lock_cstates_constrain(plr);
1228 plr->thread_done = 0;
1230 thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
1231 cpu_to_node(plr->cpu),
1232 "pseudo_lock/%u", plr->cpu);
1233 if (IS_ERR(thread)) {
1234 ret = PTR_ERR(thread);
1235 rdt_last_cmd_printf("locking thread returned error %d\n", ret);
1239 kthread_bind(thread, plr->cpu);
1240 wake_up_process(thread);
1242 ret = wait_event_interruptible(plr->lock_thread_wq,
1243 plr->thread_done == 1);
1246 * If the thread does not get on the CPU for whatever
1247 * reason and the process which sets up the region is
1248 * interrupted then this will leave the thread in runnable
1249 * state and once it gets on the CPU it will derefence
1250 * the cleared, but not freed, plr struct resulting in an
1251 * empty pseudo-locking loop.
1253 rdt_last_cmd_puts("locking thread interrupted\n");
1257 ret = pseudo_lock_minor_get(&new_minor);
1259 rdt_last_cmd_puts("unable to obtain a new minor number\n");
1264 * Unlock access but do not release the reference. The
1265 * pseudo-locked region will still be here on return.
1267 * The mutex has to be released temporarily to avoid a potential
1268 * deadlock with the mm->mmap_sem semaphore which is obtained in
1269 * the device_create() and debugfs_create_dir() callpath below
1270 * as well as before the mmap() callback is called.
1272 mutex_unlock(&rdtgroup_mutex);
1274 if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
1275 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
1277 if (!IS_ERR_OR_NULL(plr->debugfs_dir))
1278 debugfs_create_file("pseudo_lock_measure", 0200,
1279 plr->debugfs_dir, rdtgrp,
1280 &pseudo_measure_fops);
1283 dev = device_create(pseudo_lock_class, NULL,
1284 MKDEV(pseudo_lock_major, new_minor),
1285 rdtgrp, "%s", rdtgrp->kn->name);
1287 mutex_lock(&rdtgroup_mutex);
1291 rdt_last_cmd_printf("failed to create character device: %d\n",
1296 /* We released the mutex - check if group was removed while we did so */
1297 if (rdtgrp->flags & RDT_DELETED) {
1302 plr->minor = new_minor;
1304 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
1305 closid_free(rdtgrp->closid);
1306 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
1307 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
1313 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
1315 debugfs_remove_recursive(plr->debugfs_dir);
1316 pseudo_lock_minor_release(new_minor);
1318 pseudo_lock_cstates_relax(plr);
1320 pseudo_lock_region_clear(plr);
1326 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1327 * @rdtgrp: resource group to which the pseudo-locked region belongs
1329 * The removal of a pseudo-locked region can be initiated when the resource
1330 * group is removed from user space via a "rmdir" from userspace or the
1331 * unmount of the resctrl filesystem. On removal the resource group does
1332 * not go back to pseudo-locksetup mode before it is removed, instead it is
1333 * removed directly. There is thus assymmetry with the creation where the
1334 * &struct pseudo_lock_region is removed here while it was not created in
1335 * rdtgroup_pseudo_lock_create().
1339 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
1341 struct pseudo_lock_region *plr = rdtgrp->plr;
1343 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1345 * Default group cannot be a pseudo-locked region so we can
1348 closid_free(rdtgrp->closid);
1352 pseudo_lock_cstates_relax(plr);
1353 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
1354 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
1355 pseudo_lock_minor_release(plr->minor);
1358 pseudo_lock_free(rdtgrp);
1361 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
1363 struct rdtgroup *rdtgrp;
1365 mutex_lock(&rdtgroup_mutex);
1367 rdtgrp = region_find_by_minor(iminor(inode));
1369 mutex_unlock(&rdtgroup_mutex);
1373 filp->private_data = rdtgrp;
1374 atomic_inc(&rdtgrp->waitcount);
1375 /* Perform a non-seekable open - llseek is not supported */
1376 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1378 mutex_unlock(&rdtgroup_mutex);
1383 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
1385 struct rdtgroup *rdtgrp;
1387 mutex_lock(&rdtgroup_mutex);
1388 rdtgrp = filp->private_data;
1391 mutex_unlock(&rdtgroup_mutex);
1394 filp->private_data = NULL;
1395 atomic_dec(&rdtgrp->waitcount);
1396 mutex_unlock(&rdtgroup_mutex);
1400 static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
1406 static const struct vm_operations_struct pseudo_mmap_ops = {
1407 .mremap = pseudo_lock_dev_mremap,
1410 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1412 unsigned long vsize = vma->vm_end - vma->vm_start;
1413 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1414 struct pseudo_lock_region *plr;
1415 struct rdtgroup *rdtgrp;
1416 unsigned long physical;
1417 unsigned long psize;
1419 mutex_lock(&rdtgroup_mutex);
1421 rdtgrp = filp->private_data;
1424 mutex_unlock(&rdtgroup_mutex);
1431 * Task is required to run with affinity to the cpus associated
1432 * with the pseudo-locked region. If this is not the case the task
1433 * may be scheduled elsewhere and invalidate entries in the
1434 * pseudo-locked region.
1436 if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) {
1437 mutex_unlock(&rdtgroup_mutex);
1441 physical = __pa(plr->kmem) >> PAGE_SHIFT;
1442 psize = plr->size - off;
1444 if (off > plr->size) {
1445 mutex_unlock(&rdtgroup_mutex);
1450 * Ensure changes are carried directly to the memory being mapped,
1451 * do not allow copy-on-write mapping.
1453 if (!(vma->vm_flags & VM_SHARED)) {
1454 mutex_unlock(&rdtgroup_mutex);
1458 if (vsize > psize) {
1459 mutex_unlock(&rdtgroup_mutex);
1463 memset(plr->kmem + off, 0, vsize);
1465 if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
1466 vsize, vma->vm_page_prot)) {
1467 mutex_unlock(&rdtgroup_mutex);
1470 vma->vm_ops = &pseudo_mmap_ops;
1471 mutex_unlock(&rdtgroup_mutex);
1475 static const struct file_operations pseudo_lock_dev_fops = {
1476 .owner = THIS_MODULE,
1477 .llseek = no_llseek,
1480 .open = pseudo_lock_dev_open,
1481 .release = pseudo_lock_dev_release,
1482 .mmap = pseudo_lock_dev_mmap,
1485 static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
1487 struct rdtgroup *rdtgrp;
1489 rdtgrp = dev_get_drvdata(dev);
1492 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
1495 int rdt_pseudo_lock_init(void)
1499 ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
1503 pseudo_lock_major = ret;
1505 pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
1506 if (IS_ERR(pseudo_lock_class)) {
1507 ret = PTR_ERR(pseudo_lock_class);
1508 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1512 pseudo_lock_class->devnode = pseudo_lock_devnode;
1516 void rdt_pseudo_lock_release(void)
1518 class_destroy(pseudo_lock_class);
1519 pseudo_lock_class = NULL;
1520 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1521 pseudo_lock_major = 0;