2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
5 * Copyright (C) 2016 Intel Corporation
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * More information about RDT be found in the Intel (R) x86 Architecture
21 * Software Developer Manual June 2016, volume 3, section 17.17.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/cpu.h>
27 #include <linux/kernfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
33 * Check whether MBA bandwidth percentage value is correct. The value is
34 * checked against the minimum and maximum bandwidth values specified by
35 * the hardware. The allocated bandwidth percentage is rounded to the next
36 * control step available on the hardware.
38 static bool bw_validate_amd(char *buf, unsigned long *data,
39 struct rdt_resource *r)
44 ret = kstrtoul(buf, 10, &bw);
46 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
50 if (bw < r->membw.min_bw || bw > r->default_ctrl) {
51 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52 r->membw.min_bw, r->default_ctrl);
56 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
60 int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
65 if (d->have_new_ctrl) {
66 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
70 if (!bw_validate_amd(data->buf, &bw_val, r))
74 d->have_new_ctrl = true;
80 * Check whether MBA bandwidth percentage value is correct. The value is
81 * checked against the minimum and max bandwidth values specified by the
82 * hardware. The allocated bandwidth percentage is rounded to the next
83 * control step available on the hardware.
85 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
91 * Only linear delay values is supported for current Intel SKUs.
93 if (!r->membw.delay_linear) {
94 rdt_last_cmd_puts("No support for non-linear MB domains\n");
98 ret = kstrtoul(buf, 10, &bw);
100 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
104 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
106 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
107 r->membw.min_bw, r->default_ctrl);
111 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
115 int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
116 struct rdt_domain *d)
118 unsigned long bw_val;
120 if (d->have_new_ctrl) {
121 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
125 if (!bw_validate(data->buf, &bw_val, r))
127 d->new_ctrl = bw_val;
128 d->have_new_ctrl = true;
134 * Check whether a cache bit mask is valid. The SDM says:
135 * Please note that all (and only) contiguous '1' combinations
136 * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
137 * Additionally Haswell requires at least two bits set.
139 bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
141 unsigned long first_bit, zero_bit, val;
142 unsigned int cbm_len = r->cache.cbm_len;
145 ret = kstrtoul(buf, 16, &val);
147 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
151 if (val == 0 || val > r->default_ctrl) {
152 rdt_last_cmd_puts("Mask out of range\n");
156 first_bit = find_first_bit(&val, cbm_len);
157 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
159 if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
160 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
164 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
165 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
166 r->cache.min_cbm_bits);
175 * Check whether a cache bit mask is valid. AMD allows non-contiguous
178 bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
183 ret = kstrtoul(buf, 16, &val);
185 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
189 if (val > r->default_ctrl) {
190 rdt_last_cmd_puts("Mask out of range\n");
199 * Read one cache bit mask (hex). Check that it is valid for the current
202 int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
203 struct rdt_domain *d)
205 struct rdtgroup *rdtgrp = data->rdtgrp;
208 if (d->have_new_ctrl) {
209 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
214 * Cannot set up more than one pseudo-locked region in a cache
217 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
218 rdtgroup_pseudo_locked_in_hierarchy(d)) {
219 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
223 if (!r->cbm_validate(data->buf, &cbm_val, r))
226 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
227 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
228 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
229 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
234 * The CBM may not overlap with the CBM of another closid if
235 * either is exclusive.
237 if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
238 rdt_last_cmd_puts("Overlaps with exclusive group\n");
242 if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
243 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
244 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
245 rdt_last_cmd_puts("Overlaps with other group\n");
250 d->new_ctrl = cbm_val;
251 d->have_new_ctrl = true;
257 * For each domain in this resource we expect to find a series of:
259 * separated by ";". The "id" is in decimal, and must match one of
260 * the "id"s for this resource.
262 static int parse_line(char *line, struct rdt_resource *r,
263 struct rdtgroup *rdtgrp)
265 struct rdt_parse_data data;
266 char *dom = NULL, *id;
267 struct rdt_domain *d;
268 unsigned long dom_id;
270 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
271 r->rid == RDT_RESOURCE_MBA) {
272 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
277 if (!line || line[0] == '\0')
279 dom = strsep(&line, ";");
280 id = strsep(&dom, "=");
281 if (!dom || kstrtoul(id, 10, &dom_id)) {
282 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
286 list_for_each_entry(d, &r->domains, list) {
287 if (d->id == dom_id) {
289 data.rdtgrp = rdtgrp;
290 if (r->parse_ctrlval(&data, r, d))
292 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
294 * In pseudo-locking setup mode and just
295 * parsed a valid CBM that should be
296 * pseudo-locked. Only one locked region per
297 * resource group and domain so just do
298 * the required initialization for single
303 rdtgrp->plr->cbm = d->new_ctrl;
304 d->plr = rdtgrp->plr;
313 int update_domains(struct rdt_resource *r, int closid)
315 struct msr_param msr_param;
316 cpumask_var_t cpu_mask;
317 struct rdt_domain *d;
322 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
325 msr_param.low = closid;
326 msr_param.high = msr_param.low + 1;
329 mba_sc = is_mba_sc(r);
330 list_for_each_entry(d, &r->domains, list) {
331 dc = !mba_sc ? d->ctrl_val : d->mbps_val;
332 if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
333 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
334 dc[closid] = d->new_ctrl;
339 * Avoid writing the control msr with control values when
340 * MBA software controller is enabled
342 if (cpumask_empty(cpu_mask) || mba_sc)
345 /* Update resource control msr on this CPU if it's in cpu_mask. */
346 if (cpumask_test_cpu(cpu, cpu_mask))
347 rdt_ctrl_update(&msr_param);
348 /* Update resource control msr on other CPUs. */
349 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
353 free_cpumask_var(cpu_mask);
358 static int rdtgroup_parse_resource(char *resname, char *tok,
359 struct rdtgroup *rdtgrp)
361 struct rdt_resource *r;
363 for_each_alloc_enabled_rdt_resource(r) {
364 if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
365 return parse_line(tok, r, rdtgrp);
367 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
371 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
372 char *buf, size_t nbytes, loff_t off)
374 struct rdtgroup *rdtgrp;
375 struct rdt_domain *dom;
376 struct rdt_resource *r;
380 /* Valid input requires a trailing newline */
381 if (nbytes == 0 || buf[nbytes - 1] != '\n')
383 buf[nbytes - 1] = '\0';
386 rdtgrp = rdtgroup_kn_lock_live(of->kn);
388 rdtgroup_kn_unlock(of->kn);
392 rdt_last_cmd_clear();
395 * No changes to pseudo-locked region allowed. It has to be removed
396 * and re-created instead.
398 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
400 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
404 for_each_alloc_enabled_rdt_resource(r) {
405 list_for_each_entry(dom, &r->domains, list)
406 dom->have_new_ctrl = false;
409 while ((tok = strsep(&buf, "\n")) != NULL) {
410 resname = strim(strsep(&tok, ":"));
412 rdt_last_cmd_puts("Missing ':'\n");
416 if (tok[0] == '\0') {
417 rdt_last_cmd_printf("Missing '%s' value\n", resname);
421 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
426 for_each_alloc_enabled_rdt_resource(r) {
427 ret = update_domains(r, rdtgrp->closid);
432 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
434 * If pseudo-locking fails we keep the resource group in
435 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
436 * active and updated for just the domain the pseudo-locked
437 * region was requested for.
439 ret = rdtgroup_pseudo_lock_create(rdtgrp);
443 rdtgroup_kn_unlock(of->kn);
445 return ret ?: nbytes;
448 static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
450 struct rdt_domain *dom;
454 seq_printf(s, "%*s:", max_name_width, r->name);
455 list_for_each_entry(dom, &r->domains, list) {
459 ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
460 dom->mbps_val[closid]);
461 seq_printf(s, r->format_str, dom->id, max_data_width,
468 int rdtgroup_schemata_show(struct kernfs_open_file *of,
469 struct seq_file *s, void *v)
471 struct rdtgroup *rdtgrp;
472 struct rdt_resource *r;
476 rdtgrp = rdtgroup_kn_lock_live(of->kn);
478 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
479 for_each_alloc_enabled_rdt_resource(r)
480 seq_printf(s, "%s:uninitialized\n", r->name);
481 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
482 if (!rdtgrp->plr->d) {
483 rdt_last_cmd_clear();
484 rdt_last_cmd_puts("Cache domain offline\n");
487 seq_printf(s, "%s:%d=%x\n",
488 rdtgrp->plr->r->name,
493 closid = rdtgrp->closid;
494 for_each_alloc_enabled_rdt_resource(r) {
495 if (closid < r->num_closid)
496 show_doms(s, r, closid);
502 rdtgroup_kn_unlock(of->kn);
506 void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
507 struct rdtgroup *rdtgrp, int evtid, int first)
510 * setup the parameters to send to the IPI to read the data.
518 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
521 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
523 struct kernfs_open_file *of = m->private;
524 u32 resid, evtid, domid;
525 struct rdtgroup *rdtgrp;
526 struct rdt_resource *r;
527 union mon_data_bits md;
528 struct rdt_domain *d;
532 rdtgrp = rdtgroup_kn_lock_live(of->kn);
534 md.priv = of->kn->priv;
539 r = &rdt_resources_all[resid];
540 d = rdt_find_domain(r, domid, NULL);
541 if (IS_ERR_OR_NULL(d)) {
546 mon_event_read(&rr, d, rdtgrp, evtid, false);
548 if (rr.val & RMID_VAL_ERROR)
549 seq_puts(m, "Error\n");
550 else if (rr.val & RMID_VAL_UNAVAIL)
551 seq_puts(m, "Unavailable\n");
553 seq_printf(m, "%llu\n", rr.val * r->mon_scale);
556 rdtgroup_kn_unlock(of->kn);