2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
29 * Tina Zhang <tina.zhang@intel.com>
30 * Min He <min.he@intel.com>
31 * Niu Bing <bing.niu@intel.com>
32 * Zhi Wang <zhi.a.wang@intel.com>
40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
44 * Zero on success, negative error code if failed
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
50 return gpa - gttmmio_gpa;
53 #define reg_is_mmio(gvt, reg) \
54 (reg >= 0 && reg < gvt->device_info.mmio_size)
56 #define reg_is_gtt(gvt, reg) \
57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
61 * intel_vgpu_emulate_mmio_read - emulate MMIO read
63 * @pa: guest physical address
64 * @p_data: data return buffer
65 * @bytes: access data length
68 * Zero on success, negative error code if failed
70 int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
71 void *p_data, unsigned int bytes)
73 struct intel_vgpu *vgpu = __vgpu;
74 struct intel_gvt *gvt = vgpu->gvt;
75 struct intel_gvt_mmio_info *mmio;
76 unsigned int offset = 0;
79 mutex_lock(&gvt->lock);
81 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
82 struct intel_vgpu_guest_page *gp;
84 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
86 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
89 gvt_err("vgpu%d: guest page read error %d, "
90 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
92 gp->gfn, pa, *(u32 *)p_data, bytes);
94 mutex_unlock(&gvt->lock);
99 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
101 if (WARN_ON(bytes > 8))
104 if (reg_is_gtt(gvt, offset)) {
105 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
107 if (WARN_ON(bytes != 4 && bytes != 8))
109 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
112 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
116 mutex_unlock(&gvt->lock);
120 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
121 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
122 mutex_unlock(&gvt->lock);
126 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
129 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
130 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
131 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
132 vgpu->id, offset, bytes, *(u32 *)p_data);
134 if (offset == 0x206c) {
135 gvt_err("------------------------------------------\n");
136 gvt_err("vgpu%d: likely triggers a gfx reset\n",
138 gvt_err("------------------------------------------\n");
139 vgpu->mmio.disable_warn_untrack = true;
143 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
144 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
149 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
150 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
152 if (WARN_ON(mmio->offset != offset))
155 ret = mmio->read(vgpu, offset, p_data, bytes);
157 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
162 intel_gvt_mmio_set_accessed(gvt, offset);
163 mutex_unlock(&gvt->lock);
166 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
167 vgpu->id, offset, bytes);
168 mutex_unlock(&gvt->lock);
173 * intel_vgpu_emulate_mmio_write - emulate MMIO write
175 * @pa: guest physical address
176 * @p_data: write data buffer
177 * @bytes: access data length
180 * Zero on success, negative error code if failed
182 int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
183 void *p_data, unsigned int bytes)
185 struct intel_vgpu *vgpu = __vgpu;
186 struct intel_gvt *gvt = vgpu->gvt;
187 struct intel_gvt_mmio_info *mmio;
188 unsigned int offset = 0;
189 u32 old_vreg = 0, old_sreg = 0;
192 mutex_lock(&gvt->lock);
194 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
195 struct intel_vgpu_guest_page *gp;
197 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
199 ret = gp->handler(gp, pa, p_data, bytes);
201 gvt_err("vgpu%d: guest page write error %d, "
202 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
204 gp->gfn, pa, *(u32 *)p_data, bytes);
206 mutex_unlock(&gvt->lock);
211 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
213 if (WARN_ON(bytes > 8))
216 if (reg_is_gtt(gvt, offset)) {
217 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
219 if (WARN_ON(bytes != 4 && bytes != 8))
221 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
224 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
228 mutex_unlock(&gvt->lock);
232 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
233 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
234 mutex_unlock(&gvt->lock);
238 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
239 if (!mmio && !vgpu->mmio.disable_warn_untrack)
240 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
241 vgpu->id, offset, bytes, *(u32 *)p_data);
243 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
244 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
249 u64 ro_mask = mmio->ro_mask;
251 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
252 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
254 if (WARN_ON(mmio->offset != offset))
258 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
259 old_vreg = vgpu_vreg(vgpu, offset);
260 old_sreg = vgpu_sreg(vgpu, offset);
264 ret = mmio->write(vgpu, offset, p_data, bytes);
266 /* Protect RO bits like HW */
269 /* all register bits are RO. */
270 if (ro_mask == ~(u64)0) {
271 gvt_err("vgpu%d: try to write RO reg %x\n",
276 /* keep the RO bits in the virtual register */
277 memcpy(&data, p_data, bytes);
278 data &= ~mmio->ro_mask;
279 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
280 ret = mmio->write(vgpu, offset, &data, bytes);
283 /* higher 16bits of mode ctl regs are mask bits for change */
284 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
285 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
287 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
288 | (vgpu_vreg(vgpu, offset) & mask);
289 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
290 | (vgpu_sreg(vgpu, offset) & mask);
293 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
298 intel_gvt_mmio_set_accessed(gvt, offset);
299 mutex_unlock(&gvt->lock);
302 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
303 vgpu->id, offset, bytes);
304 mutex_unlock(&gvt->lock);