2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
45 int mm_switch_table[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
59 {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
69 {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
78 {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
79 {0, 0, 0, 0, 0, 0, 0, 0},
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
92 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
95 void physical_mode_init(struct kvm_vcpu *vcpu)
97 vcpu->arch.mode_flags = GUEST_IN_PHY;
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr = ia64_clear_ic();
106 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
108 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
115 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
119 psr = ia64_clear_ic();
120 ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
122 ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
128 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
130 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
133 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
134 struct ia64_psr new_psr)
137 act = mm_switch_action(old_psr, new_psr);
140 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141 old_psr.val, new_psr.val);*/
142 switch_to_physical_rid(vcpu);
144 * Set rse to enforced lazy, to prevent active rse
145 *save/restor when guest physical mode.
147 vcpu->arch.mode_flags |= GUEST_IN_PHY;
150 switch_to_virtual_rid(vcpu);
152 * recover old mode which is saved when entering
153 * guest physical mode
155 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
169 * In physical mode, insert tc/tr for region 0 and 4 uses
170 * RID[0] and RID[4] which is for physical mode emulation.
171 * However what those inserted tc/tr wants is rid for
172 * virtual mode. So original virtual rid needs to be restored
175 * Operations which required such switch include:
176 * - insertions (itc.*, itr.*)
177 * - purges (ptc.* and ptr.*)
181 * All above needs actual virtual rid for destination entry.
184 void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
185 struct ia64_psr new_psr)
188 if ((old_psr.dt != new_psr.dt)
189 || (old_psr.it != new_psr.it)
190 || (old_psr.rt != new_psr.rt))
191 switch_mm_mode(vcpu, old_psr, new_psr);
198 * In physical mode, insert tc/tr for region 0 and 4 uses
199 * RID[0] and RID[4] which is for physical mode emulation.
200 * However what those inserted tc/tr wants is rid for
201 * virtual mode. So original virtual rid needs to be restored
204 * Operations which required such switch include:
205 * - insertions (itc.*, itr.*)
206 * - purges (ptc.* and ptr.*)
210 * All above needs actual virtual rid for destination entry.
213 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
215 if (is_physical_mode(vcpu)) {
216 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
217 switch_to_virtual_rid(vcpu);
222 /* Recover always follows prepare */
223 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
225 if (is_physical_mode(vcpu))
226 switch_to_physical_rid(vcpu);
227 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
231 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
233 static u16 gr_info[32] = {
234 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
235 RPT(r1), RPT(r2), RPT(r3),
236 RPT(r4), RPT(r5), RPT(r6), RPT(r7),
237 RPT(r8), RPT(r9), RPT(r10), RPT(r11),
238 RPT(r12), RPT(r13), RPT(r14), RPT(r15),
239 RPT(r16), RPT(r17), RPT(r18), RPT(r19),
240 RPT(r20), RPT(r21), RPT(r22), RPT(r23),
241 RPT(r24), RPT(r25), RPT(r26), RPT(r27),
242 RPT(r28), RPT(r29), RPT(r30), RPT(r31)
245 #define IA64_FIRST_STACKED_GR 32
246 #define IA64_FIRST_ROTATING_FR 32
248 static inline unsigned long
249 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
258 * Return the (rotated) index for floating point register
259 * be in the REGNUM (REGNUM must range from 32-127,
260 * result is in the range from 0-95.
262 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
265 unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
266 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270 * The inverse of the above: given bspstore and the number of
271 * registers, calculate ar.bsp.
273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
276 long delta = ia64_rse_slot_num(addr) + num_regs;
282 while (delta <= -0x3f) {
287 while (delta >= 0x3f) {
293 return addr + num_regs + i;
296 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
297 unsigned long *val, int *nat)
299 unsigned long *bsp, *addr, *rnat_addr, *bspstore;
300 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
301 unsigned long nat_mask;
302 unsigned long old_rsc, new_rsc;
303 long sof = (regs->cr_ifs) & 0x7f;
304 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
305 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
309 ridx = rotate_reg(sor, rrb_gr, ridx);
311 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
312 new_rsc = old_rsc&(~(0x3));
313 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
315 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
316 bsp = kbs + (regs->loadrs >> 19);
318 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319 nat_mask = 1UL << ia64_rse_slot_num(addr);
320 rnat_addr = ia64_rse_rnat_addr(addr);
322 if (addr >= bspstore) {
325 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
329 if (bspstore < rnat_addr)
330 *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
333 *nat = (int)!!((*rnat_addr) & nat_mask);
334 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
338 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
339 unsigned long val, unsigned long nat)
341 unsigned long *bsp, *bspstore, *addr, *rnat_addr;
342 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
343 unsigned long nat_mask;
344 unsigned long old_rsc, new_rsc, psr;
346 long sof = (regs->cr_ifs) & 0x7f;
347 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
348 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
352 ridx = rotate_reg(sor, rrb_gr, ridx);
354 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
355 /* put RSC to lazy mode, and set loadrs 0 */
356 new_rsc = old_rsc & (~0x3fff0003);
357 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
358 bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
360 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361 nat_mask = 1UL << ia64_rse_slot_num(addr);
362 rnat_addr = ia64_rse_rnat_addr(addr);
365 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
366 if (addr >= bspstore) {
371 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
372 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
373 if (bspstore < rnat_addr)
374 rnat = rnat & (~nat_mask);
376 *rnat_addr = (*rnat_addr)&(~nat_mask);
380 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
382 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
384 if (bspstore < rnat_addr)
385 rnat = rnat&(~nat_mask);
387 *rnat_addr = (*rnat_addr) & (~nat_mask);
389 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
390 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
392 local_irq_restore(psr);
393 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
396 void getreg(unsigned long regnum, unsigned long *val,
397 int *nat, struct kvm_pt_regs *regs)
399 unsigned long addr, *unat;
400 if (regnum >= IA64_FIRST_STACKED_GR) {
401 get_rse_reg(regs, regnum, val, nat);
406 * Now look at registers in [0-31] range and init correct UNAT
408 addr = (unsigned long)regs;
409 unat = ®s->eml_unat;;
411 addr += gr_info[regnum];
413 *val = *(unsigned long *)addr;
415 * do it only when requested
418 *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
421 void setreg(unsigned long regnum, unsigned long val,
422 int nat, struct kvm_pt_regs *regs)
425 unsigned long bitmask;
429 * First takes care of stacked registers
431 if (regnum >= IA64_FIRST_STACKED_GR) {
432 set_rse_reg(regs, regnum, val, nat);
437 * Now look at registers in [0-31] range and init correct UNAT
439 addr = (unsigned long)regs;
440 unat = ®s->eml_unat;
442 * add offset from base of struct
445 addr += gr_info[regnum];
447 *(unsigned long *)addr = val;
450 * We need to clear the corresponding UNAT bit to fully emulate the load
451 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
453 bitmask = 1UL << ((addr >> 3) & 0x3f);
461 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
463 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
468 getreg(reg, &val, 0, regs);
472 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
474 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475 long sof = (regs->cr_ifs) & 0x7f;
481 setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
484 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
485 struct kvm_pt_regs *regs)
487 /* Take floating register rotation into consideration*/
488 if (regnum >= IA64_FIRST_ROTATING_FR)
489 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
490 #define CASE_FIXED_FP(reg) \
492 ia64_stf_spill(fpval, reg); \
630 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
631 struct kvm_pt_regs *regs)
633 /* Take floating register rotation into consideration*/
634 if (regnum >= IA64_FIRST_ROTATING_FR)
635 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
637 #define CASE_FIXED_FP(reg) \
639 ia64_ldf_fill(reg, fpval); \
774 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
775 struct ia64_fpreg *val)
777 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
779 getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
782 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
783 struct ia64_fpreg *val)
785 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
788 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
791 /************************************************************************
793 ***********************************************************************/
794 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
796 unsigned long guest_itc;
797 guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
799 if (guest_itc >= VMX(vcpu, last_itc)) {
800 VMX(vcpu, last_itc) = guest_itc;
803 return VMX(vcpu, last_itc);
806 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
807 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
811 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
812 unsigned long vitv = VCPU(vcpu, itv);
814 if (vcpu->vcpu_id == 0) {
815 for (i = 0; i < KVM_MAX_VCPUS; i++) {
816 v = (struct kvm_vcpu *)((char *)vcpu +
817 sizeof(struct kvm_vcpu_data) * i);
818 VMX(v, itc_offset) = itc_offset;
819 VMX(v, last_itc) = 0;
822 VMX(vcpu, last_itc) = 0;
823 if (VCPU(vcpu, itm) <= val) {
824 VMX(vcpu, itc_check) = 0;
825 vcpu_unpend_interrupt(vcpu, vitv);
827 VMX(vcpu, itc_check) = 1;
828 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
833 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
835 return ((u64)VCPU(vcpu, itm));
838 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
840 unsigned long vitv = VCPU(vcpu, itv);
841 VCPU(vcpu, itm) = val;
843 if (val > vcpu_get_itc(vcpu)) {
844 VMX(vcpu, itc_check) = 1;
845 vcpu_unpend_interrupt(vcpu, vitv);
846 VMX(vcpu, timer_pending) = 0;
848 VMX(vcpu, itc_check) = 0;
851 #define ITV_VECTOR(itv) (itv&0xff)
852 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
854 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
856 VCPU(vcpu, itv) = val;
857 if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
858 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
859 vcpu->arch.timer_pending = 0;
863 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
867 vec = highest_inservice_irq(vcpu);
868 if (vec == NULL_VECTOR)
870 VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
872 vcpu->arch.irq_new_pending = 1;
876 /* See Table 5-8 in SDM vol2 for the definition */
877 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
881 vtpr.val = VCPU(vcpu, tpr);
883 if (h_inservice == NMI_VECTOR)
884 return IRQ_MASKED_BY_INSVC;
886 if (h_pending == NMI_VECTOR) {
887 /* Non Maskable Interrupt */
888 return IRQ_NO_MASKED;
891 if (h_inservice == ExtINT_VECTOR)
892 return IRQ_MASKED_BY_INSVC;
894 if (h_pending == ExtINT_VECTOR) {
896 /* mask all external IRQ */
897 return IRQ_MASKED_BY_VTPR;
899 return IRQ_NO_MASKED;
902 if (is_higher_irq(h_pending, h_inservice)) {
903 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
904 return IRQ_NO_MASKED;
906 return IRQ_MASKED_BY_VTPR;
908 return IRQ_MASKED_BY_INSVC;
912 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
917 local_irq_save(spsr);
918 ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
919 local_irq_restore(spsr);
921 vcpu->arch.irq_new_pending = 1;
924 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
929 local_irq_save(spsr);
930 ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
931 local_irq_restore(spsr);
933 vcpu->arch.irq_new_pending = 1;
938 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
942 if (vec == NULL_VECTOR)
944 else if (vec == NMI_VECTOR)
946 else if (vec == ExtINT_VECTOR)
951 VCPU(vcpu, vhpi) = vhpi;
952 if (VCPU(vcpu, vac).a_int)
953 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
954 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
957 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
959 int vec, h_inservice, mask;
961 vec = highest_pending_irq(vcpu);
962 h_inservice = highest_inservice_irq(vcpu);
963 mask = irq_masked(vcpu, vec, h_inservice);
964 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
965 if (VCPU(vcpu, vhpi))
966 update_vhpi(vcpu, NULL_VECTOR);
967 return IA64_SPURIOUS_INT_VECTOR;
969 if (mask == IRQ_MASKED_BY_VTPR) {
970 update_vhpi(vcpu, vec);
971 return IA64_SPURIOUS_INT_VECTOR;
973 VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
974 vcpu_unpend_interrupt(vcpu, vec);
978 /**************************************************************************
979 Privileged operation emulation routines
980 **************************************************************************/
981 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
988 vpta.val = vcpu_get_pta(vcpu);
989 vrr.val = vcpu_get_rr(vcpu, vadr);
990 vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
992 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
993 vpta.val, 0, 0, 0, 0);
995 pval = (vadr & VRN_MASK) | vhpt_offset |
996 (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1001 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1004 union ia64_pta vpta;
1007 vpta.val = vcpu_get_pta(vcpu);
1008 vrr.val = vcpu_get_rr(vcpu, vadr);
1010 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1018 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1020 struct thash_data *data;
1021 union ia64_pta vpta;
1024 vpta.val = vcpu_get_pta(vcpu);
1029 data = vtlb_lookup(vcpu, vadr, D_TLB);
1030 if (!data || !data->p)
1038 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1040 unsigned long thash, vadr;
1042 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1043 thash = vcpu_thash(vcpu, vadr);
1044 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1047 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1049 unsigned long tag, vadr;
1051 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1052 tag = vcpu_ttag(vcpu, vadr);
1053 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1056 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1058 struct thash_data *data;
1059 union ia64_isr visr, pt_isr;
1060 struct kvm_pt_regs *regs;
1061 struct ia64_psr vpsr;
1063 regs = vcpu_regs(vcpu);
1064 pt_isr.val = VMX(vcpu, cr_isr);
1066 visr.ei = pt_isr.ei;
1067 visr.ir = pt_isr.ir;
1068 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1071 data = vhpt_lookup(vadr);
1074 vcpu_set_isr(vcpu, visr.val);
1075 data_page_not_present(vcpu, vadr);
1077 } else if (data->ma == VA_MATTR_NATPAGE) {
1078 vcpu_set_isr(vcpu, visr.val);
1079 dnat_page_consumption(vcpu, vadr);
1082 *padr = (data->gpaddr >> data->ps << data->ps) |
1083 (vadr & (PSIZE(data->ps) - 1));
1084 return IA64_NO_FAULT;
1088 data = vtlb_lookup(vcpu, vadr, D_TLB);
1091 vcpu_set_isr(vcpu, visr.val);
1092 data_page_not_present(vcpu, vadr);
1094 } else if (data->ma == VA_MATTR_NATPAGE) {
1095 vcpu_set_isr(vcpu, visr.val);
1096 dnat_page_consumption(vcpu, vadr);
1099 *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1100 | (vadr & (PSIZE(data->ps) - 1));
1101 return IA64_NO_FAULT;
1104 if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1106 vcpu_set_isr(vcpu, visr.val);
1107 alt_dtlb(vcpu, vadr);
1115 vcpu_set_isr(vcpu, visr.val);
1116 dvhpt_fault(vcpu, vadr);
1124 return IA64_NO_FAULT;
1127 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1129 unsigned long r1, r3;
1131 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1133 if (vcpu_tpa(vcpu, r3, &r1))
1136 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1137 return(IA64_NO_FAULT);
1140 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1142 unsigned long r1, r3;
1144 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1145 r1 = vcpu_tak(vcpu, r3);
1146 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1149 /************************************
1150 * Insert/Purge translation register/cache
1151 ************************************/
1152 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1154 thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1157 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1159 thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1162 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1165 struct thash_data *p_itr;
1168 va = PAGEALIGN(ifa, ps);
1169 pte &= ~PAGE_FLAGS_RV_MASK;
1170 rid = vcpu_get_rr(vcpu, ifa);
1171 rid = rid & RR_RID_MASK;
1172 p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1173 vcpu_set_tr(p_itr, pte, itir, va, rid);
1174 vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1178 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1182 struct thash_data *p_dtr;
1185 va = PAGEALIGN(ifa, ps);
1186 pte &= ~PAGE_FLAGS_RV_MASK;
1188 if (ps != _PAGE_SIZE_16M)
1189 thash_purge_entries(vcpu, va, ps);
1190 gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1191 if (__gpfn_is_io(gpfn))
1193 rid = vcpu_get_rr(vcpu, va);
1194 rid = rid & RR_RID_MASK;
1195 p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1196 vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1197 pte, itir, va, rid);
1198 vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1201 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1206 va = PAGEALIGN(ifa, ps);
1207 while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1208 vcpu->arch.dtrs[index].page_flags = 0;
1210 thash_purge_entries(vcpu, va, ps);
1213 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1218 va = PAGEALIGN(ifa, ps);
1219 while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1220 vcpu->arch.itrs[index].page_flags = 0;
1222 thash_purge_entries(vcpu, va, ps);
1225 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1227 va = PAGEALIGN(va, ps);
1228 thash_purge_entries(vcpu, va, ps);
1231 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1233 thash_purge_all(vcpu);
1236 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1238 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1240 local_irq_save(psr);
1241 p->exit_reason = EXIT_REASON_PTC_G;
1243 p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1244 p->u.ptc_g_data.vaddr = va;
1245 p->u.ptc_g_data.ps = ps;
1246 vmm_transition(vcpu);
1247 /* Do Local Purge Here*/
1248 vcpu_ptc_l(vcpu, va, ps);
1249 local_irq_restore(psr);
1253 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1255 vcpu_ptc_ga(vcpu, va, ps);
1258 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1262 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1263 vcpu_ptc_e(vcpu, ifa);
1266 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1268 unsigned long ifa, itir;
1270 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1271 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1272 vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1275 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1277 unsigned long ifa, itir;
1279 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1280 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1281 vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1284 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1286 unsigned long ifa, itir;
1288 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1289 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1290 vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1293 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1295 unsigned long ifa, itir;
1297 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1298 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1299 vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1302 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1304 unsigned long ifa, itir;
1306 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1307 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1308 vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1311 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1313 unsigned long itir, ifa, pte, slot;
1315 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1316 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1317 itir = vcpu_get_itir(vcpu);
1318 ifa = vcpu_get_ifa(vcpu);
1319 vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1324 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1326 unsigned long itir, ifa, pte, slot;
1328 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1329 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1330 itir = vcpu_get_itir(vcpu);
1331 ifa = vcpu_get_ifa(vcpu);
1332 vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1335 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1337 unsigned long itir, ifa, pte;
1339 itir = vcpu_get_itir(vcpu);
1340 ifa = vcpu_get_ifa(vcpu);
1341 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1342 vcpu_itc_d(vcpu, pte, itir, ifa);
1345 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1347 unsigned long itir, ifa, pte;
1349 itir = vcpu_get_itir(vcpu);
1350 ifa = vcpu_get_ifa(vcpu);
1351 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1352 vcpu_itc_i(vcpu, pte, itir, ifa);
1355 /*************************************
1356 * Moves to semi-privileged registers
1357 *************************************/
1359 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1364 imm = -inst.M30.imm;
1368 vcpu_set_itc(vcpu, imm);
1371 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1375 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1376 vcpu_set_itc(vcpu, r2);
1379 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1383 r1 = vcpu_get_itc(vcpu);
1384 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1387 /**************************************************************************
1388 struct kvm_vcpu protection key register access routines
1389 **************************************************************************/
1391 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1393 return ((unsigned long)ia64_get_pkr(reg));
1396 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1398 ia64_set_pkr(reg, val);
1401 /********************************
1402 * Moves to privileged registers
1403 ********************************/
1404 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1407 union ia64_rr oldrr, newrr;
1408 unsigned long rrval;
1409 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1412 oldrr.val = vcpu_get_rr(vcpu, reg);
1414 vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1416 switch ((unsigned long)(reg >> VRN_SHIFT)) {
1418 vcpu->arch.vmm_rr = vrrtomrr(val);
1419 local_irq_save(psr);
1420 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1421 vmm_transition(vcpu);
1422 local_irq_restore(psr);
1425 rrval = vrrtomrr(val);
1426 vcpu->arch.metaphysical_saved_rr4 = rrval;
1427 if (!is_physical_mode(vcpu))
1428 ia64_set_rr(reg, rrval);
1431 rrval = vrrtomrr(val);
1432 vcpu->arch.metaphysical_saved_rr0 = rrval;
1433 if (!is_physical_mode(vcpu))
1434 ia64_set_rr(reg, rrval);
1437 ia64_set_rr(reg, vrrtomrr(val));
1441 return (IA64_NO_FAULT);
1444 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1446 unsigned long r3, r2;
1448 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1449 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1450 vcpu_set_rr(vcpu, r3, r2);
1453 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1457 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1461 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1463 unsigned long r3, r2;
1465 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1466 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1467 vcpu_set_pmc(vcpu, r3, r2);
1470 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1472 unsigned long r3, r2;
1474 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1475 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1476 vcpu_set_pmd(vcpu, r3, r2);
1479 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1483 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1484 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1485 vcpu_set_pkr(vcpu, r3, r2);
1488 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1490 unsigned long r3, r1;
1492 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1493 r1 = vcpu_get_rr(vcpu, r3);
1494 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1497 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1499 unsigned long r3, r1;
1501 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1502 r1 = vcpu_get_pkr(vcpu, r3);
1503 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1506 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1508 unsigned long r3, r1;
1510 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1511 r1 = vcpu_get_dbr(vcpu, r3);
1512 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1515 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1517 unsigned long r3, r1;
1519 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1520 r1 = vcpu_get_ibr(vcpu, r3);
1521 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1524 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1526 unsigned long r3, r1;
1528 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1529 r1 = vcpu_get_pmc(vcpu, r3);
1530 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1533 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1535 /* FIXME: This could get called as a result of a rsvd-reg fault */
1536 if (reg > (ia64_get_cpuid(3) & 0xff))
1539 return ia64_get_cpuid(reg);
1542 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1544 unsigned long r3, r1;
1546 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1547 r1 = vcpu_get_cpuid(vcpu, r3);
1548 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1551 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1553 VCPU(vcpu, tpr) = val;
1554 vcpu->arch.irq_check = 1;
1557 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1561 r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1562 VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1564 switch (inst.M32.cr3) {
1566 vcpu_set_dcr(vcpu, r2);
1569 vcpu_set_itm(vcpu, r2);
1572 vcpu_set_tpr(vcpu, r2);
1575 vcpu_set_eoi(vcpu, r2);
1584 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1586 unsigned long tgt = inst.M33.r1;
1589 switch (inst.M33.cr3) {
1591 val = vcpu_get_ivr(vcpu);
1592 vcpu_set_gr(vcpu, tgt, val, 0);
1596 vcpu_set_gr(vcpu, tgt, 0L, 0);
1599 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1600 vcpu_set_gr(vcpu, tgt, val, 0);
1607 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1611 struct kvm_pt_regs *regs;
1612 struct ia64_psr old_psr, new_psr;
1614 old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1616 regs = vcpu_regs(vcpu);
1617 /* We only support guest as:
1622 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1623 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1627 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1628 * Since these bits will become 0, after success execution of each
1629 * instruction, we will change set them to mIA64_PSR
1631 VCPU(vcpu, vpsr) = val
1632 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1633 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1635 if (!old_psr.i && (val & IA64_PSR_I)) {
1637 vcpu->arch.irq_check = 1;
1639 new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1642 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1643 * , except for the following bits:
1644 * ic/i/dt/si/rt/mc/it/bn/vm
1646 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1647 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1650 regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1652 check_mm_mode_switch(vcpu, old_psr, new_psr);
1657 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1659 struct ia64_psr vpsr;
1661 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1662 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1665 VCPU(vcpu, ifs) = regs->cr_ifs;
1666 regs->cr_ifs = IA64_IFS_V;
1667 return (IA64_NO_FAULT);
1672 /**************************************************************************
1673 VCPU banked general register access routines
1674 **************************************************************************/
1675 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1677 __asm__ __volatile__ ( \
1678 ";;extr.u %0 = %3,%6,16;;\n" \
1679 "dep %1 = %0, %1, 0, 16;;\n" \
1681 "extr.u %0 = %2, 16, 16;;\n" \
1682 "dep %3 = %0, %3, %6, 16;;\n" \
1684 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1685 "r"(*runat), "r"(b1unat), "r"(runat), \
1686 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1689 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1693 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1694 unsigned long *r = ®s->r16;
1695 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1696 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1697 unsigned long *runat = ®s->eml_unat;
1698 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1699 unsigned long *b1unat = &VCPU(vcpu, vnat);
1702 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1703 for (i = 0; i < 16; i++) {
1707 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1708 VMM_PT_REGS_R16_SLOT);
1709 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1713 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1715 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1716 "dep %1 = %0, %1, 16, 16;;\n" \
1718 "extr.u %0 = %2, 0, 16;;\n" \
1719 "dep %3 = %0, %3, %6, 16;;\n" \
1721 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1722 "r"(*runat), "r"(b0unat), "r"(runat), \
1723 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1726 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1729 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1730 unsigned long *r = ®s->r16;
1731 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1732 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1733 unsigned long *runat = ®s->eml_unat;
1734 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1735 unsigned long *b1unat = &VCPU(vcpu, vnat);
1737 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1738 for (i = 0; i < 16; i++) {
1742 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1743 VMM_PT_REGS_R16_SLOT);
1744 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1748 void vcpu_rfi(struct kvm_vcpu *vcpu)
1750 unsigned long ifs, psr;
1751 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1753 psr = VCPU(vcpu, ipsr);
1754 if (psr & IA64_PSR_BN)
1758 vcpu_set_psr(vcpu, psr);
1759 ifs = VCPU(vcpu, ifs);
1762 regs->cr_iip = VCPU(vcpu, iip);
1766 VPSR can't keep track of below bits of guest PSR
1767 This function gets guest PSR
1770 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1773 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1775 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1776 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1777 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1780 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1783 unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1786 vpsr = vcpu_get_psr(vcpu);
1788 vcpu_set_psr(vcpu, vpsr);
1791 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1794 unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1797 vpsr = vcpu_get_psr(vcpu);
1799 vcpu_set_psr(vcpu, vpsr);
1804 * bit -- starting bit
1805 * len -- how many bits
1807 #define MASK(bit,len) \
1811 __asm __volatile("dep %0=-1, r0, %1, %2"\
1818 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1820 val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1821 vcpu_set_psr(vcpu, val);
1824 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1828 val = vcpu_get_gr(vcpu, inst.M35.r2);
1829 vcpu_set_psr_l(vcpu, val);
1832 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1836 val = vcpu_get_psr(vcpu);
1837 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1838 vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1841 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1843 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1844 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1845 if (ipsr->ri == 2) {
1852 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1854 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1855 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1857 if (ipsr->ri == 0) {
1864 /** Emulate a privileged operation.
1867 * @param vcpu virtual cpu
1868 * @cause the reason cause virtualization fault
1869 * @opcode the instruction code which cause virtualization fault
1872 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1874 unsigned long status, cause, opcode ;
1877 status = IA64_NO_FAULT;
1878 cause = VMX(vcpu, cause);
1879 opcode = VMX(vcpu, opcode);
1882 * Switch to actual virtual rid in rr0 and rr4,
1883 * which is required by some tlb related instructions.
1885 prepare_if_physical_mode(vcpu);
1889 kvm_rsm(vcpu, inst);
1892 kvm_ssm(vcpu, inst);
1894 case EVENT_MOV_TO_PSR:
1895 kvm_mov_to_psr(vcpu, inst);
1897 case EVENT_MOV_FROM_PSR:
1898 kvm_mov_from_psr(vcpu, inst);
1900 case EVENT_MOV_FROM_CR:
1901 kvm_mov_from_cr(vcpu, inst);
1903 case EVENT_MOV_TO_CR:
1904 kvm_mov_to_cr(vcpu, inst);
1919 kvm_itr_d(vcpu, inst);
1922 kvm_itr_i(vcpu, inst);
1925 kvm_ptr_d(vcpu, inst);
1928 kvm_ptr_i(vcpu, inst);
1931 kvm_itc_d(vcpu, inst);
1934 kvm_itc_i(vcpu, inst);
1937 kvm_ptc_l(vcpu, inst);
1940 kvm_ptc_g(vcpu, inst);
1943 kvm_ptc_ga(vcpu, inst);
1946 kvm_ptc_e(vcpu, inst);
1948 case EVENT_MOV_TO_RR:
1949 kvm_mov_to_rr(vcpu, inst);
1951 case EVENT_MOV_FROM_RR:
1952 kvm_mov_from_rr(vcpu, inst);
1955 kvm_thash(vcpu, inst);
1958 kvm_ttag(vcpu, inst);
1961 status = kvm_tpa(vcpu, inst);
1964 kvm_tak(vcpu, inst);
1966 case EVENT_MOV_TO_AR_IMM:
1967 kvm_mov_to_ar_imm(vcpu, inst);
1969 case EVENT_MOV_TO_AR:
1970 kvm_mov_to_ar_reg(vcpu, inst);
1972 case EVENT_MOV_FROM_AR:
1973 kvm_mov_from_ar_reg(vcpu, inst);
1975 case EVENT_MOV_TO_DBR:
1976 kvm_mov_to_dbr(vcpu, inst);
1978 case EVENT_MOV_TO_IBR:
1979 kvm_mov_to_ibr(vcpu, inst);
1981 case EVENT_MOV_TO_PMC:
1982 kvm_mov_to_pmc(vcpu, inst);
1984 case EVENT_MOV_TO_PMD:
1985 kvm_mov_to_pmd(vcpu, inst);
1987 case EVENT_MOV_TO_PKR:
1988 kvm_mov_to_pkr(vcpu, inst);
1990 case EVENT_MOV_FROM_DBR:
1991 kvm_mov_from_dbr(vcpu, inst);
1993 case EVENT_MOV_FROM_IBR:
1994 kvm_mov_from_ibr(vcpu, inst);
1996 case EVENT_MOV_FROM_PMC:
1997 kvm_mov_from_pmc(vcpu, inst);
1999 case EVENT_MOV_FROM_PKR:
2000 kvm_mov_from_pkr(vcpu, inst);
2002 case EVENT_MOV_FROM_CPUID:
2003 kvm_mov_from_cpuid(vcpu, inst);
2006 status = IA64_FAULT;
2011 /*Assume all status is NO_FAULT ?*/
2012 if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2013 vcpu_increment_iip(vcpu);
2015 recover_if_physical_mode(vcpu);
2018 void init_vcpu(struct kvm_vcpu *vcpu)
2022 vcpu->arch.mode_flags = GUEST_IN_PHY;
2023 VMX(vcpu, vrr[0]) = 0x38;
2024 VMX(vcpu, vrr[1]) = 0x38;
2025 VMX(vcpu, vrr[2]) = 0x38;
2026 VMX(vcpu, vrr[3]) = 0x38;
2027 VMX(vcpu, vrr[4]) = 0x38;
2028 VMX(vcpu, vrr[5]) = 0x38;
2029 VMX(vcpu, vrr[6]) = 0x38;
2030 VMX(vcpu, vrr[7]) = 0x38;
2031 VCPU(vcpu, vpsr) = IA64_PSR_BN;
2032 VCPU(vcpu, dcr) = 0;
2033 /* pta.size must not be 0. The minimum is 15 (32k) */
2034 VCPU(vcpu, pta) = 15 << 2;
2035 VCPU(vcpu, itv) = 0x10000;
2036 VCPU(vcpu, itm) = 0;
2037 VMX(vcpu, last_itc) = 0;
2039 VCPU(vcpu, lid) = VCPU_LID(vcpu);
2040 VCPU(vcpu, ivr) = 0;
2041 VCPU(vcpu, tpr) = 0x10000;
2042 VCPU(vcpu, eoi) = 0;
2043 VCPU(vcpu, irr[0]) = 0;
2044 VCPU(vcpu, irr[1]) = 0;
2045 VCPU(vcpu, irr[2]) = 0;
2046 VCPU(vcpu, irr[3]) = 0;
2047 VCPU(vcpu, pmv) = 0x10000;
2048 VCPU(vcpu, cmcv) = 0x10000;
2049 VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
2050 VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
2051 update_vhpi(vcpu, NULL_VECTOR);
2052 VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
2054 for (i = 0; i < 4; i++)
2055 VLSAPIC_INSVC(vcpu, i) = 0;
2058 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2062 local_irq_save(psr);
2064 /* WARNING: not allow co-exist of both virtual mode and physical
2065 * mode in same region
2068 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2069 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2071 if (is_physical_mode(vcpu)) {
2072 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2073 panic_vm(vcpu, "Machine Status conflicts!\n");
2075 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2076 ia64_dv_serialize_data();
2077 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2078 ia64_dv_serialize_data();
2080 ia64_set_rr((VRN0 << VRN_SHIFT),
2081 vcpu->arch.metaphysical_saved_rr0);
2082 ia64_dv_serialize_data();
2083 ia64_set_rr((VRN4 << VRN_SHIFT),
2084 vcpu->arch.metaphysical_saved_rr4);
2085 ia64_dv_serialize_data();
2087 ia64_set_rr((VRN1 << VRN_SHIFT),
2088 vrrtomrr(VMX(vcpu, vrr[VRN1])));
2089 ia64_dv_serialize_data();
2090 ia64_set_rr((VRN2 << VRN_SHIFT),
2091 vrrtomrr(VMX(vcpu, vrr[VRN2])));
2092 ia64_dv_serialize_data();
2093 ia64_set_rr((VRN3 << VRN_SHIFT),
2094 vrrtomrr(VMX(vcpu, vrr[VRN3])));
2095 ia64_dv_serialize_data();
2096 ia64_set_rr((VRN5 << VRN_SHIFT),
2097 vrrtomrr(VMX(vcpu, vrr[VRN5])));
2098 ia64_dv_serialize_data();
2099 ia64_set_rr((VRN7 << VRN_SHIFT),
2100 vrrtomrr(VMX(vcpu, vrr[VRN7])));
2101 ia64_dv_serialize_data();
2111 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2122 static void kvm_show_registers(struct kvm_pt_regs *regs)
2124 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2126 struct kvm_vcpu *vcpu = current_vcpu;
2128 printk("vcpu 0x%p vcpu %d\n",
2129 vcpu, vcpu->vcpu_id);
2131 printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2132 regs->cr_ipsr, regs->cr_ifs, ip);
2134 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2135 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2136 printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
2137 regs->ar_rnat, regs->ar_bspstore, regs->pr);
2138 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2139 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2140 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2141 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
2142 regs->b6, regs->b7);
2143 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
2144 regs->f6.u.bits[1], regs->f6.u.bits[0],
2145 regs->f7.u.bits[1], regs->f7.u.bits[0]);
2146 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
2147 regs->f8.u.bits[1], regs->f8.u.bits[0],
2148 regs->f9.u.bits[1], regs->f9.u.bits[0]);
2149 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2150 regs->f10.u.bits[1], regs->f10.u.bits[0],
2151 regs->f11.u.bits[1], regs->f11.u.bits[0]);
2153 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
2154 regs->r2, regs->r3);
2155 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
2156 regs->r9, regs->r10);
2157 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2158 regs->r12, regs->r13);
2159 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2160 regs->r15, regs->r16);
2161 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2162 regs->r18, regs->r19);
2163 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2164 regs->r21, regs->r22);
2165 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2166 regs->r24, regs->r25);
2167 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2168 regs->r27, regs->r28);
2169 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2170 regs->r30, regs->r31);
2174 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2179 struct kvm_pt_regs *regs = vcpu_regs(v);
2180 struct exit_ctl_data *p = &v->arch.exit_data;
2181 va_start(args, fmt);
2182 vsnprintf(buf, sizeof(buf), fmt, args);
2185 kvm_show_registers(regs);
2186 p->exit_reason = EXIT_REASON_VM_PANIC;