rust: upgrade to Rust 1.76.0
[sfrench/cifs-2.6.git] / arch / riscv / kvm / vcpu_sbi_sta.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 Ventana Micro Systems Inc.
4  */
5
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
9 #include <linux/mm.h>
10 #include <linux/sizes.h>
11
12 #include <asm/bug.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 #include <asm/page.h>
16 #include <asm/sbi.h>
17 #include <asm/uaccess.h>
18
19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
20 {
21         vcpu->arch.sta.shmem = INVALID_GPA;
22         vcpu->arch.sta.last_steal = 0;
23 }
24
25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
26 {
27         gpa_t shmem = vcpu->arch.sta.shmem;
28         u64 last_steal = vcpu->arch.sta.last_steal;
29         u32 *sequence_ptr, sequence;
30         u64 *steal_ptr, steal;
31         unsigned long hva;
32         gfn_t gfn;
33
34         if (shmem == INVALID_GPA)
35                 return;
36
37         /*
38          * shmem is 64-byte aligned (see the enforcement in
39          * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
40          * is 64 bytes, so we know all its offsets are in the same page.
41          */
42         gfn = shmem >> PAGE_SHIFT;
43         hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
44
45         if (WARN_ON(kvm_is_error_hva(hva))) {
46                 vcpu->arch.sta.shmem = INVALID_GPA;
47                 return;
48         }
49
50         sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
51                                offsetof(struct sbi_sta_struct, sequence));
52         steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
53                             offsetof(struct sbi_sta_struct, steal));
54
55         if (WARN_ON(get_user(sequence, sequence_ptr)))
56                 return;
57
58         sequence = le32_to_cpu(sequence);
59         sequence += 1;
60
61         if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
62                 return;
63
64         if (!WARN_ON(get_user(steal, steal_ptr))) {
65                 steal = le64_to_cpu(steal);
66                 vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
67                 steal += vcpu->arch.sta.last_steal - last_steal;
68                 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
69         }
70
71         sequence += 1;
72         WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
73
74         kvm_vcpu_mark_page_dirty(vcpu, gfn);
75 }
76
77 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
78 {
79         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
80         unsigned long shmem_phys_lo = cp->a0;
81         unsigned long shmem_phys_hi = cp->a1;
82         u32 flags = cp->a2;
83         struct sbi_sta_struct zero_sta = {0};
84         unsigned long hva;
85         bool writable;
86         gpa_t shmem;
87         int ret;
88
89         if (flags != 0)
90                 return SBI_ERR_INVALID_PARAM;
91
92         if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
93             shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
94                 vcpu->arch.sta.shmem = INVALID_GPA;
95                 return 0;
96         }
97
98         if (shmem_phys_lo & (SZ_64 - 1))
99                 return SBI_ERR_INVALID_PARAM;
100
101         shmem = shmem_phys_lo;
102
103         if (shmem_phys_hi != 0) {
104                 if (IS_ENABLED(CONFIG_32BIT))
105                         shmem |= ((gpa_t)shmem_phys_hi << 32);
106                 else
107                         return SBI_ERR_INVALID_ADDRESS;
108         }
109
110         hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
111         if (kvm_is_error_hva(hva) || !writable)
112                 return SBI_ERR_INVALID_ADDRESS;
113
114         ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
115         if (ret)
116                 return SBI_ERR_FAILURE;
117
118         vcpu->arch.sta.shmem = shmem;
119         vcpu->arch.sta.last_steal = current->sched_info.run_delay;
120
121         return 0;
122 }
123
124 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
125                                    struct kvm_vcpu_sbi_return *retdata)
126 {
127         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
128         unsigned long funcid = cp->a6;
129         int ret;
130
131         switch (funcid) {
132         case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
133                 ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
134                 break;
135         default:
136                 ret = SBI_ERR_NOT_SUPPORTED;
137                 break;
138         }
139
140         retdata->err_val = ret;
141
142         return 0;
143 }
144
145 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
146 {
147         return !!sched_info_on();
148 }
149
150 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
151         .extid_start = SBI_EXT_STA,
152         .extid_end = SBI_EXT_STA,
153         .handler = kvm_sbi_ext_sta_handler,
154         .probe = kvm_sbi_ext_sta_probe,
155 };
156
157 int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
158                                    unsigned long reg_num,
159                                    unsigned long *reg_val)
160 {
161         switch (reg_num) {
162         case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
163                 *reg_val = (unsigned long)vcpu->arch.sta.shmem;
164                 break;
165         case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
166                 if (IS_ENABLED(CONFIG_32BIT))
167                         *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
168                 else
169                         *reg_val = 0;
170                 break;
171         default:
172                 return -EINVAL;
173         }
174
175         return 0;
176 }
177
178 int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
179                                    unsigned long reg_num,
180                                    unsigned long reg_val)
181 {
182         switch (reg_num) {
183         case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
184                 if (IS_ENABLED(CONFIG_32BIT)) {
185                         gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
186
187                         vcpu->arch.sta.shmem = reg_val;
188                         vcpu->arch.sta.shmem |= hi << 32;
189                 } else {
190                         vcpu->arch.sta.shmem = reg_val;
191                 }
192                 break;
193         case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
194                 if (IS_ENABLED(CONFIG_32BIT)) {
195                         gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
196
197                         vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
198                         vcpu->arch.sta.shmem |= lo;
199                 } else if (reg_val != 0) {
200                         return -EINVAL;
201                 }
202                 break;
203         default:
204                 return -EINVAL;
205         }
206
207         return 0;
208 }