// SPDX-License-Identifier: GPL-2.0-only /* * User interface for Resource Allocation in Resource Director Technology(RDT) * * Copyright (C) 2016 Intel Corporation * * Author: Fenghua Yu * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include "internal.h" DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); /* * This is safe against resctrl_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. */ void resctrl_arch_sync_cpu_defaults(void *info) { struct resctrl_cpu_sync *r = info; if (r) { this_cpu_write(pqr_state.default_closid, r->closid); this_cpu_write(pqr_state.default_rmid, r->rmid); } /* * We cannot unconditionally write the MSR because the current * executing task might have its own closid selected. Just reuse * the context switch code. */ resctrl_sched_in(current); } #define INVALID_CONFIG_INDEX UINT_MAX /** * mon_event_config_index_get - get the hardware index for the * configurable event * @evtid: event id. * * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID * INVALID_CONFIG_INDEX for invalid evtid */ static inline unsigned int mon_event_config_index_get(u32 evtid) { switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: return 0; case QOS_L3_MBM_LOCAL_EVENT_ID: return 1; default: /* Should never reach here */ return INVALID_CONFIG_INDEX; } } void resctrl_arch_mon_event_config_read(void *info) { struct resctrl_mon_config_info *mon_info = info; unsigned int index; u64 msrval; index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); return; } rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); /* Report only the valid event configuration bits */ mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } void resctrl_arch_mon_event_config_write(void *info) { struct resctrl_mon_config_info *mon_info = info; unsigned int index; index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); mon_info->err = -EINVAL; return; } wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); mon_info->err = 0; } static void l3_qos_cfg_update(void *arg) { bool *enable = arg; wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } static void l2_qos_cfg_update(void *arg) { bool *enable = arg; wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } static int set_cache_qos_cfg(int level, bool enable) { void (*update)(void *arg); struct rdt_resource *r_l; cpumask_var_t cpu_mask; struct rdt_domain *d; int cpu; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) update = l2_qos_cfg_update; else return -EINVAL; if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; r_l = &rdt_resources_all[level].r_resctrl; list_for_each_entry(d, &r_l->domains, list) { if (r_l->cache.arch_has_per_cpu_cfg) /* Pick all the CPUs in the domain instance */ for_each_cpu(cpu, &d->cpu_mask) cpumask_set_cpu(cpu, cpu_mask); else /* Pick one CPU from each domain instance to update MSR */ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ on_each_cpu_mask(cpu_mask, update, &enable, 1); free_cpumask_var(cpu_mask); return 0; } /* Restore the qos cfg state when a domain comes online */ void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); if (!r->cdp_capable) return; if (r->rid == RDT_RESOURCE_L2) l2_qos_cfg_update(&hw_res->cdp_enabled); if (r->rid == RDT_RESOURCE_L3) l3_qos_cfg_update(&hw_res->cdp_enabled); } static int cdp_enable(int level) { struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; int ret; if (!r_l->alloc_capable) return -EINVAL; ret = set_cache_qos_cfg(level, true); if (!ret) rdt_resources_all[level].cdp_enabled = true; return ret; } static void cdp_disable(int level) { struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; if (r_hw->cdp_enabled) { set_cache_qos_cfg(level, false); r_hw->cdp_enabled = false; } } int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; if (enable) return cdp_enable(l); cdp_disable(l); return 0; } static int reset_all_ctrls(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom; struct msr_param msr_param; cpumask_var_t cpu_mask; struct rdt_domain *d; int i; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; msr_param.res = r; msr_param.low = 0; msr_param.high = hw_res->num_closid; /* * Disable resource control for this resource by setting all * CBMs in all domains to the maximum mask value. Pick one CPU * from each domain to update the MSRs below. */ list_for_each_entry(d, &r->domains, list) { hw_dom = resctrl_to_arch_dom(d); cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); for (i = 0; i < hw_res->num_closid; i++) hw_dom->ctrl_val[i] = r->default_ctrl; } /* Update CBM on all the CPUs in cpu_mask */ on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); free_cpumask_var(cpu_mask); return 0; } void resctrl_arch_reset_resources(void) { struct rdt_resource *r; for_each_capable_rdt_resource(r) reset_all_ctrls(r); }