1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2023 Ventana Micro Systems Inc.
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
10 #include <linux/sizes.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
17 #include <asm/uaccess.h>
19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
21 vcpu->arch.sta.shmem = INVALID_GPA;
22 vcpu->arch.sta.last_steal = 0;
25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
27 gpa_t shmem = vcpu->arch.sta.shmem;
28 u64 last_steal = vcpu->arch.sta.last_steal;
29 u32 *sequence_ptr, sequence;
30 u64 *steal_ptr, steal;
34 if (shmem == INVALID_GPA)
38 * shmem is 64-byte aligned (see the enforcement in
39 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
40 * is 64 bytes, so we know all its offsets are in the same page.
42 gfn = shmem >> PAGE_SHIFT;
43 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
45 if (WARN_ON(kvm_is_error_hva(hva))) {
46 vcpu->arch.sta.shmem = INVALID_GPA;
50 sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
51 offsetof(struct sbi_sta_struct, sequence));
52 steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
53 offsetof(struct sbi_sta_struct, steal));
55 if (WARN_ON(get_user(sequence, sequence_ptr)))
58 sequence = le32_to_cpu(sequence);
61 if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
64 if (!WARN_ON(get_user(steal, steal_ptr))) {
65 steal = le64_to_cpu(steal);
66 vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
67 steal += vcpu->arch.sta.last_steal - last_steal;
68 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
72 WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
74 kvm_vcpu_mark_page_dirty(vcpu, gfn);
77 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
79 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
80 unsigned long shmem_phys_lo = cp->a0;
81 unsigned long shmem_phys_hi = cp->a1;
83 struct sbi_sta_struct zero_sta = {0};
90 return SBI_ERR_INVALID_PARAM;
92 if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
93 shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
94 vcpu->arch.sta.shmem = INVALID_GPA;
98 if (shmem_phys_lo & (SZ_64 - 1))
99 return SBI_ERR_INVALID_PARAM;
101 shmem = shmem_phys_lo;
103 if (shmem_phys_hi != 0) {
104 if (IS_ENABLED(CONFIG_32BIT))
105 shmem |= ((gpa_t)shmem_phys_hi << 32);
107 return SBI_ERR_INVALID_ADDRESS;
110 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
111 if (kvm_is_error_hva(hva) || !writable)
112 return SBI_ERR_INVALID_ADDRESS;
114 ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
116 return SBI_ERR_FAILURE;
118 vcpu->arch.sta.shmem = shmem;
119 vcpu->arch.sta.last_steal = current->sched_info.run_delay;
124 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
125 struct kvm_vcpu_sbi_return *retdata)
127 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
128 unsigned long funcid = cp->a6;
132 case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
133 ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
136 ret = SBI_ERR_NOT_SUPPORTED;
140 retdata->err_val = ret;
145 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
147 return !!sched_info_on();
150 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
151 .extid_start = SBI_EXT_STA,
152 .extid_end = SBI_EXT_STA,
153 .handler = kvm_sbi_ext_sta_handler,
154 .probe = kvm_sbi_ext_sta_probe,
157 int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
158 unsigned long reg_num,
159 unsigned long *reg_val)
162 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
163 *reg_val = (unsigned long)vcpu->arch.sta.shmem;
165 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
166 if (IS_ENABLED(CONFIG_32BIT))
167 *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
178 int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
179 unsigned long reg_num,
180 unsigned long reg_val)
183 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
184 if (IS_ENABLED(CONFIG_32BIT)) {
185 gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
187 vcpu->arch.sta.shmem = reg_val;
188 vcpu->arch.sta.shmem |= hi << 32;
190 vcpu->arch.sta.shmem = reg_val;
193 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
194 if (IS_ENABLED(CONFIG_32BIT)) {
195 gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
197 vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
198 vcpu->arch.sta.shmem |= lo;
199 } else if (reg_val != 0) {