KVM: ia64: Code cleanup
[sfrench/cifs-2.6.git] / arch / ia64 / kvm / vcpu.c
1 /*
2  * kvm_vcpu.c: handling all virtual cpu related thing.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *  Shaofan Li (Susue Li) <susie.li@intel.com>
19  *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *  Xiantao Zhang <xiantao.zhang@intel.com>
22  */
23
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
26
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33
34 #include "asm-offsets.h"
35 #include "vcpu.h"
36
37 /*
38  * Special notes:
39  * - Index by it/dt/rt sequence
40  * - Only existing mode transitions are allowed in this table
41  * - RSE is placed at lazy mode when emulating guest partial mode
42  * - If gva happens to be rr0 and rr4, only allowed case is identity
43  *   mapping (gva=gpa), or panic! (How?)
44  */
45 int mm_switch_table[8][8] = {
46         /*  2004/09/12(Kevin): Allow switch to self */
47         /*
48          *  (it,dt,rt): (0,0,0) -> (1,1,1)
49          *  This kind of transition usually occurs in the very early
50          *  stage of Linux boot up procedure. Another case is in efi
51          *  and pal calls. (see "arch/ia64/kernel/head.S")
52          *
53          *  (it,dt,rt): (0,0,0) -> (0,1,1)
54          *  This kind of transition is found when OSYa exits efi boot
55          *  service. Due to gva = gpa in this case (Same region),
56          *  data access can be satisfied though itlb entry for physical
57          *  emulation is hit.
58          */
59         {SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60         {0,  0,  0,  0,  0,  0,  0,  0},
61         {0,  0,  0,  0,  0,  0,  0,  0},
62         /*
63          *  (it,dt,rt): (0,1,1) -> (1,1,1)
64          *  This kind of transition is found in OSYa.
65          *
66          *  (it,dt,rt): (0,1,1) -> (0,0,0)
67          *  This kind of transition is found in OSYa
68          */
69         {SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70         /* (1,0,0)->(1,1,1) */
71         {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72         /*
73          *  (it,dt,rt): (1,0,1) -> (1,1,1)
74          *  This kind of transition usually occurs when Linux returns
75          *  from the low level TLB miss handlers.
76          *  (see "arch/ia64/kernel/ivt.S")
77          */
78         {0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79         {0,  0,  0,  0,  0,  0,  0,  0},
80         /*
81          *  (it,dt,rt): (1,1,1) -> (1,0,1)
82          *  This kind of transition usually occurs in Linux low level
83          *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84          *
85          *  (it,dt,rt): (1,1,1) -> (0,0,0)
86          *  This kind of transition usually occurs in pal and efi calls,
87          *  which requires running in physical mode.
88          *  (see "arch/ia64/kernel/head.S")
89          *  (1,1,1)->(1,0,0)
90          */
91
92         {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93 };
94
95 void physical_mode_init(struct kvm_vcpu  *vcpu)
96 {
97         vcpu->arch.mode_flags = GUEST_IN_PHY;
98 }
99
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101 {
102         unsigned long psr;
103
104         /* Save original virtual mode rr[0] and rr[4] */
105         psr = ia64_clear_ic();
106         ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107         ia64_srlz_d();
108         ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109         ia64_srlz_d();
110
111         ia64_set_psr(psr);
112         return;
113 }
114
115 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
116 {
117         unsigned long psr;
118
119         psr = ia64_clear_ic();
120         ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
121         ia64_srlz_d();
122         ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
123         ia64_srlz_d();
124         ia64_set_psr(psr);
125         return;
126 }
127
128 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
129 {
130         return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
131 }
132
133 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
134                                         struct ia64_psr new_psr)
135 {
136         int act;
137         act = mm_switch_action(old_psr, new_psr);
138         switch (act) {
139         case SW_V2P:
140                 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141                 old_psr.val, new_psr.val);*/
142                 switch_to_physical_rid(vcpu);
143                 /*
144                  * Set rse to enforced lazy, to prevent active rse
145                  *save/restor when guest physical mode.
146                  */
147                 vcpu->arch.mode_flags |= GUEST_IN_PHY;
148                 break;
149         case SW_P2V:
150                 switch_to_virtual_rid(vcpu);
151                 /*
152                  * recover old mode which is saved when entering
153                  * guest physical mode
154                  */
155                 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
156                 break;
157         case SW_SELF:
158                 break;
159         case SW_NOP:
160                 break;
161         default:
162                 /* Sanity check */
163                 break;
164         }
165         return;
166 }
167
168 /*
169  * In physical mode, insert tc/tr for region 0 and 4 uses
170  * RID[0] and RID[4] which is for physical mode emulation.
171  * However what those inserted tc/tr wants is rid for
172  * virtual mode. So original virtual rid needs to be restored
173  * before insert.
174  *
175  * Operations which required such switch include:
176  *  - insertions (itc.*, itr.*)
177  *  - purges (ptc.* and ptr.*)
178  *  - tpa
179  *  - tak
180  *  - thash?, ttag?
181  * All above needs actual virtual rid for destination entry.
182  */
183
184 void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
185                                         struct ia64_psr new_psr)
186 {
187
188         if ((old_psr.dt != new_psr.dt)
189                         || (old_psr.it != new_psr.it)
190                         || (old_psr.rt != new_psr.rt))
191                 switch_mm_mode(vcpu, old_psr, new_psr);
192
193         return;
194 }
195
196
197 /*
198  * In physical mode, insert tc/tr for region 0 and 4 uses
199  * RID[0] and RID[4] which is for physical mode emulation.
200  * However what those inserted tc/tr wants is rid for
201  * virtual mode. So original virtual rid needs to be restored
202  * before insert.
203  *
204  * Operations which required such switch include:
205  *  - insertions (itc.*, itr.*)
206  *  - purges (ptc.* and ptr.*)
207  *  - tpa
208  *  - tak
209  *  - thash?, ttag?
210  * All above needs actual virtual rid for destination entry.
211  */
212
213 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
214 {
215         if (is_physical_mode(vcpu)) {
216                 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
217                 switch_to_virtual_rid(vcpu);
218         }
219         return;
220 }
221
222 /* Recover always follows prepare */
223 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
224 {
225         if (is_physical_mode(vcpu))
226                 switch_to_physical_rid(vcpu);
227         vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
228         return;
229 }
230
231 #define RPT(x)  ((u16) &((struct kvm_pt_regs *)0)->x)
232
233 static u16 gr_info[32] = {
234         0,      /* r0 is read-only : WE SHOULD NEVER GET THIS */
235         RPT(r1), RPT(r2), RPT(r3),
236         RPT(r4), RPT(r5), RPT(r6), RPT(r7),
237         RPT(r8), RPT(r9), RPT(r10), RPT(r11),
238         RPT(r12), RPT(r13), RPT(r14), RPT(r15),
239         RPT(r16), RPT(r17), RPT(r18), RPT(r19),
240         RPT(r20), RPT(r21), RPT(r22), RPT(r23),
241         RPT(r24), RPT(r25), RPT(r26), RPT(r27),
242         RPT(r28), RPT(r29), RPT(r30), RPT(r31)
243 };
244
245 #define IA64_FIRST_STACKED_GR   32
246 #define IA64_FIRST_ROTATING_FR  32
247
248 static inline unsigned long
249 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
250 {
251         reg += rrb;
252         if (reg >= sor)
253                 reg -= sor;
254         return reg;
255 }
256
257 /*
258  * Return the (rotated) index for floating point register
259  * be in the REGNUM (REGNUM must range from 32-127,
260  * result is in the range from 0-95.
261  */
262 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
263                                                 long regnum)
264 {
265         unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
266         return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
267 }
268
269 /*
270  * The inverse of the above: given bspstore and the number of
271  * registers, calculate ar.bsp.
272  */
273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
274                                                         long num_regs)
275 {
276         long delta = ia64_rse_slot_num(addr) + num_regs;
277         int i = 0;
278
279         if (num_regs < 0)
280                 delta -= 0x3e;
281         if (delta < 0) {
282                 while (delta <= -0x3f) {
283                         i--;
284                         delta += 0x3f;
285                 }
286         } else {
287                 while (delta >= 0x3f) {
288                         i++;
289                         delta -= 0x3f;
290                 }
291         }
292
293         return addr + num_regs + i;
294 }
295
296 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
297                                         unsigned long *val, int *nat)
298 {
299         unsigned long *bsp, *addr, *rnat_addr, *bspstore;
300         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
301         unsigned long nat_mask;
302         unsigned long old_rsc, new_rsc;
303         long sof = (regs->cr_ifs) & 0x7f;
304         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
305         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
306         long ridx = r1 - 32;
307
308         if (ridx < sor)
309                 ridx = rotate_reg(sor, rrb_gr, ridx);
310
311         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
312         new_rsc = old_rsc&(~(0x3));
313         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
314
315         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
316         bsp = kbs + (regs->loadrs >> 19);
317
318         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319         nat_mask = 1UL << ia64_rse_slot_num(addr);
320         rnat_addr = ia64_rse_rnat_addr(addr);
321
322         if (addr >= bspstore) {
323                 ia64_flushrs();
324                 ia64_mf();
325                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
326         }
327         *val = *addr;
328         if (nat) {
329                 if (bspstore < rnat_addr)
330                         *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
331                                                         & nat_mask);
332                 else
333                         *nat = (int)!!((*rnat_addr) & nat_mask);
334                 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
335         }
336 }
337
338 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
339                                 unsigned long val, unsigned long nat)
340 {
341         unsigned long *bsp, *bspstore, *addr, *rnat_addr;
342         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
343         unsigned long nat_mask;
344         unsigned long old_rsc, new_rsc, psr;
345         unsigned long rnat;
346         long sof = (regs->cr_ifs) & 0x7f;
347         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
348         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
349         long ridx = r1 - 32;
350
351         if (ridx < sor)
352                 ridx = rotate_reg(sor, rrb_gr, ridx);
353
354         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
355         /* put RSC to lazy mode, and set loadrs 0 */
356         new_rsc = old_rsc & (~0x3fff0003);
357         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
358         bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
359
360         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361         nat_mask = 1UL << ia64_rse_slot_num(addr);
362         rnat_addr = ia64_rse_rnat_addr(addr);
363
364         local_irq_save(psr);
365         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
366         if (addr >= bspstore) {
367
368                 ia64_flushrs();
369                 ia64_mf();
370                 *addr = val;
371                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
372                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
373                 if (bspstore < rnat_addr)
374                         rnat = rnat & (~nat_mask);
375                 else
376                         *rnat_addr = (*rnat_addr)&(~nat_mask);
377
378                 ia64_mf();
379                 ia64_loadrs();
380                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
381         } else {
382                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
383                 *addr = val;
384                 if (bspstore < rnat_addr)
385                         rnat = rnat&(~nat_mask);
386                 else
387                         *rnat_addr = (*rnat_addr) & (~nat_mask);
388
389                 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
390                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
391         }
392         local_irq_restore(psr);
393         ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
394 }
395
396 void getreg(unsigned long regnum, unsigned long *val,
397                                 int *nat, struct kvm_pt_regs *regs)
398 {
399         unsigned long addr, *unat;
400         if (regnum >= IA64_FIRST_STACKED_GR) {
401                 get_rse_reg(regs, regnum, val, nat);
402                 return;
403         }
404
405         /*
406          * Now look at registers in [0-31] range and init correct UNAT
407          */
408         addr = (unsigned long)regs;
409         unat = &regs->eml_unat;;
410
411         addr += gr_info[regnum];
412
413         *val  = *(unsigned long *)addr;
414         /*
415          * do it only when requested
416          */
417         if (nat)
418                 *nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
419 }
420
421 void setreg(unsigned long regnum, unsigned long val,
422                         int nat, struct kvm_pt_regs *regs)
423 {
424         unsigned long addr;
425         unsigned long bitmask;
426         unsigned long *unat;
427
428         /*
429          * First takes care of stacked registers
430          */
431         if (regnum >= IA64_FIRST_STACKED_GR) {
432                 set_rse_reg(regs, regnum, val, nat);
433                 return;
434         }
435
436         /*
437          * Now look at registers in [0-31] range and init correct UNAT
438          */
439         addr = (unsigned long)regs;
440         unat = &regs->eml_unat;
441         /*
442          * add offset from base of struct
443          * and do it !
444          */
445         addr += gr_info[regnum];
446
447         *(unsigned long *)addr = val;
448
449         /*
450          * We need to clear the corresponding UNAT bit to fully emulate the load
451          * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
452          */
453         bitmask   = 1UL << ((addr >> 3) & 0x3f);
454         if (nat)
455                 *unat |= bitmask;
456          else
457                 *unat &= ~bitmask;
458
459 }
460
461 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
462 {
463         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
464         u64 val;
465
466         if (!reg)
467                 return 0;
468         getreg(reg, &val, 0, regs);
469         return val;
470 }
471
472 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
473 {
474         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475         long sof = (regs->cr_ifs) & 0x7f;
476
477         if (!reg)
478                 return;
479         if (reg >= sof + 32)
480                 return;
481         setreg(reg, value, nat, regs);  /* FIXME: handle NATs later*/
482 }
483
484 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
485                                 struct kvm_pt_regs *regs)
486 {
487         /* Take floating register rotation into consideration*/
488         if (regnum >= IA64_FIRST_ROTATING_FR)
489                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
490 #define CASE_FIXED_FP(reg)                      \
491         case  (reg) :                           \
492                 ia64_stf_spill(fpval, reg);     \
493         break
494
495         switch (regnum) {
496                 CASE_FIXED_FP(0);
497                 CASE_FIXED_FP(1);
498                 CASE_FIXED_FP(2);
499                 CASE_FIXED_FP(3);
500                 CASE_FIXED_FP(4);
501                 CASE_FIXED_FP(5);
502
503                 CASE_FIXED_FP(6);
504                 CASE_FIXED_FP(7);
505                 CASE_FIXED_FP(8);
506                 CASE_FIXED_FP(9);
507                 CASE_FIXED_FP(10);
508                 CASE_FIXED_FP(11);
509
510                 CASE_FIXED_FP(12);
511                 CASE_FIXED_FP(13);
512                 CASE_FIXED_FP(14);
513                 CASE_FIXED_FP(15);
514                 CASE_FIXED_FP(16);
515                 CASE_FIXED_FP(17);
516                 CASE_FIXED_FP(18);
517                 CASE_FIXED_FP(19);
518                 CASE_FIXED_FP(20);
519                 CASE_FIXED_FP(21);
520                 CASE_FIXED_FP(22);
521                 CASE_FIXED_FP(23);
522                 CASE_FIXED_FP(24);
523                 CASE_FIXED_FP(25);
524                 CASE_FIXED_FP(26);
525                 CASE_FIXED_FP(27);
526                 CASE_FIXED_FP(28);
527                 CASE_FIXED_FP(29);
528                 CASE_FIXED_FP(30);
529                 CASE_FIXED_FP(31);
530                 CASE_FIXED_FP(32);
531                 CASE_FIXED_FP(33);
532                 CASE_FIXED_FP(34);
533                 CASE_FIXED_FP(35);
534                 CASE_FIXED_FP(36);
535                 CASE_FIXED_FP(37);
536                 CASE_FIXED_FP(38);
537                 CASE_FIXED_FP(39);
538                 CASE_FIXED_FP(40);
539                 CASE_FIXED_FP(41);
540                 CASE_FIXED_FP(42);
541                 CASE_FIXED_FP(43);
542                 CASE_FIXED_FP(44);
543                 CASE_FIXED_FP(45);
544                 CASE_FIXED_FP(46);
545                 CASE_FIXED_FP(47);
546                 CASE_FIXED_FP(48);
547                 CASE_FIXED_FP(49);
548                 CASE_FIXED_FP(50);
549                 CASE_FIXED_FP(51);
550                 CASE_FIXED_FP(52);
551                 CASE_FIXED_FP(53);
552                 CASE_FIXED_FP(54);
553                 CASE_FIXED_FP(55);
554                 CASE_FIXED_FP(56);
555                 CASE_FIXED_FP(57);
556                 CASE_FIXED_FP(58);
557                 CASE_FIXED_FP(59);
558                 CASE_FIXED_FP(60);
559                 CASE_FIXED_FP(61);
560                 CASE_FIXED_FP(62);
561                 CASE_FIXED_FP(63);
562                 CASE_FIXED_FP(64);
563                 CASE_FIXED_FP(65);
564                 CASE_FIXED_FP(66);
565                 CASE_FIXED_FP(67);
566                 CASE_FIXED_FP(68);
567                 CASE_FIXED_FP(69);
568                 CASE_FIXED_FP(70);
569                 CASE_FIXED_FP(71);
570                 CASE_FIXED_FP(72);
571                 CASE_FIXED_FP(73);
572                 CASE_FIXED_FP(74);
573                 CASE_FIXED_FP(75);
574                 CASE_FIXED_FP(76);
575                 CASE_FIXED_FP(77);
576                 CASE_FIXED_FP(78);
577                 CASE_FIXED_FP(79);
578                 CASE_FIXED_FP(80);
579                 CASE_FIXED_FP(81);
580                 CASE_FIXED_FP(82);
581                 CASE_FIXED_FP(83);
582                 CASE_FIXED_FP(84);
583                 CASE_FIXED_FP(85);
584                 CASE_FIXED_FP(86);
585                 CASE_FIXED_FP(87);
586                 CASE_FIXED_FP(88);
587                 CASE_FIXED_FP(89);
588                 CASE_FIXED_FP(90);
589                 CASE_FIXED_FP(91);
590                 CASE_FIXED_FP(92);
591                 CASE_FIXED_FP(93);
592                 CASE_FIXED_FP(94);
593                 CASE_FIXED_FP(95);
594                 CASE_FIXED_FP(96);
595                 CASE_FIXED_FP(97);
596                 CASE_FIXED_FP(98);
597                 CASE_FIXED_FP(99);
598                 CASE_FIXED_FP(100);
599                 CASE_FIXED_FP(101);
600                 CASE_FIXED_FP(102);
601                 CASE_FIXED_FP(103);
602                 CASE_FIXED_FP(104);
603                 CASE_FIXED_FP(105);
604                 CASE_FIXED_FP(106);
605                 CASE_FIXED_FP(107);
606                 CASE_FIXED_FP(108);
607                 CASE_FIXED_FP(109);
608                 CASE_FIXED_FP(110);
609                 CASE_FIXED_FP(111);
610                 CASE_FIXED_FP(112);
611                 CASE_FIXED_FP(113);
612                 CASE_FIXED_FP(114);
613                 CASE_FIXED_FP(115);
614                 CASE_FIXED_FP(116);
615                 CASE_FIXED_FP(117);
616                 CASE_FIXED_FP(118);
617                 CASE_FIXED_FP(119);
618                 CASE_FIXED_FP(120);
619                 CASE_FIXED_FP(121);
620                 CASE_FIXED_FP(122);
621                 CASE_FIXED_FP(123);
622                 CASE_FIXED_FP(124);
623                 CASE_FIXED_FP(125);
624                 CASE_FIXED_FP(126);
625                 CASE_FIXED_FP(127);
626         }
627 #undef CASE_FIXED_FP
628 }
629
630 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
631                                         struct kvm_pt_regs *regs)
632 {
633         /* Take floating register rotation into consideration*/
634         if (regnum >= IA64_FIRST_ROTATING_FR)
635                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
636
637 #define CASE_FIXED_FP(reg)                      \
638         case (reg) :                            \
639                 ia64_ldf_fill(reg, fpval);      \
640         break
641
642         switch (regnum) {
643                 CASE_FIXED_FP(2);
644                 CASE_FIXED_FP(3);
645                 CASE_FIXED_FP(4);
646                 CASE_FIXED_FP(5);
647
648                 CASE_FIXED_FP(6);
649                 CASE_FIXED_FP(7);
650                 CASE_FIXED_FP(8);
651                 CASE_FIXED_FP(9);
652                 CASE_FIXED_FP(10);
653                 CASE_FIXED_FP(11);
654
655                 CASE_FIXED_FP(12);
656                 CASE_FIXED_FP(13);
657                 CASE_FIXED_FP(14);
658                 CASE_FIXED_FP(15);
659                 CASE_FIXED_FP(16);
660                 CASE_FIXED_FP(17);
661                 CASE_FIXED_FP(18);
662                 CASE_FIXED_FP(19);
663                 CASE_FIXED_FP(20);
664                 CASE_FIXED_FP(21);
665                 CASE_FIXED_FP(22);
666                 CASE_FIXED_FP(23);
667                 CASE_FIXED_FP(24);
668                 CASE_FIXED_FP(25);
669                 CASE_FIXED_FP(26);
670                 CASE_FIXED_FP(27);
671                 CASE_FIXED_FP(28);
672                 CASE_FIXED_FP(29);
673                 CASE_FIXED_FP(30);
674                 CASE_FIXED_FP(31);
675                 CASE_FIXED_FP(32);
676                 CASE_FIXED_FP(33);
677                 CASE_FIXED_FP(34);
678                 CASE_FIXED_FP(35);
679                 CASE_FIXED_FP(36);
680                 CASE_FIXED_FP(37);
681                 CASE_FIXED_FP(38);
682                 CASE_FIXED_FP(39);
683                 CASE_FIXED_FP(40);
684                 CASE_FIXED_FP(41);
685                 CASE_FIXED_FP(42);
686                 CASE_FIXED_FP(43);
687                 CASE_FIXED_FP(44);
688                 CASE_FIXED_FP(45);
689                 CASE_FIXED_FP(46);
690                 CASE_FIXED_FP(47);
691                 CASE_FIXED_FP(48);
692                 CASE_FIXED_FP(49);
693                 CASE_FIXED_FP(50);
694                 CASE_FIXED_FP(51);
695                 CASE_FIXED_FP(52);
696                 CASE_FIXED_FP(53);
697                 CASE_FIXED_FP(54);
698                 CASE_FIXED_FP(55);
699                 CASE_FIXED_FP(56);
700                 CASE_FIXED_FP(57);
701                 CASE_FIXED_FP(58);
702                 CASE_FIXED_FP(59);
703                 CASE_FIXED_FP(60);
704                 CASE_FIXED_FP(61);
705                 CASE_FIXED_FP(62);
706                 CASE_FIXED_FP(63);
707                 CASE_FIXED_FP(64);
708                 CASE_FIXED_FP(65);
709                 CASE_FIXED_FP(66);
710                 CASE_FIXED_FP(67);
711                 CASE_FIXED_FP(68);
712                 CASE_FIXED_FP(69);
713                 CASE_FIXED_FP(70);
714                 CASE_FIXED_FP(71);
715                 CASE_FIXED_FP(72);
716                 CASE_FIXED_FP(73);
717                 CASE_FIXED_FP(74);
718                 CASE_FIXED_FP(75);
719                 CASE_FIXED_FP(76);
720                 CASE_FIXED_FP(77);
721                 CASE_FIXED_FP(78);
722                 CASE_FIXED_FP(79);
723                 CASE_FIXED_FP(80);
724                 CASE_FIXED_FP(81);
725                 CASE_FIXED_FP(82);
726                 CASE_FIXED_FP(83);
727                 CASE_FIXED_FP(84);
728                 CASE_FIXED_FP(85);
729                 CASE_FIXED_FP(86);
730                 CASE_FIXED_FP(87);
731                 CASE_FIXED_FP(88);
732                 CASE_FIXED_FP(89);
733                 CASE_FIXED_FP(90);
734                 CASE_FIXED_FP(91);
735                 CASE_FIXED_FP(92);
736                 CASE_FIXED_FP(93);
737                 CASE_FIXED_FP(94);
738                 CASE_FIXED_FP(95);
739                 CASE_FIXED_FP(96);
740                 CASE_FIXED_FP(97);
741                 CASE_FIXED_FP(98);
742                 CASE_FIXED_FP(99);
743                 CASE_FIXED_FP(100);
744                 CASE_FIXED_FP(101);
745                 CASE_FIXED_FP(102);
746                 CASE_FIXED_FP(103);
747                 CASE_FIXED_FP(104);
748                 CASE_FIXED_FP(105);
749                 CASE_FIXED_FP(106);
750                 CASE_FIXED_FP(107);
751                 CASE_FIXED_FP(108);
752                 CASE_FIXED_FP(109);
753                 CASE_FIXED_FP(110);
754                 CASE_FIXED_FP(111);
755                 CASE_FIXED_FP(112);
756                 CASE_FIXED_FP(113);
757                 CASE_FIXED_FP(114);
758                 CASE_FIXED_FP(115);
759                 CASE_FIXED_FP(116);
760                 CASE_FIXED_FP(117);
761                 CASE_FIXED_FP(118);
762                 CASE_FIXED_FP(119);
763                 CASE_FIXED_FP(120);
764                 CASE_FIXED_FP(121);
765                 CASE_FIXED_FP(122);
766                 CASE_FIXED_FP(123);
767                 CASE_FIXED_FP(124);
768                 CASE_FIXED_FP(125);
769                 CASE_FIXED_FP(126);
770                 CASE_FIXED_FP(127);
771         }
772 }
773
774 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
775                                                 struct ia64_fpreg *val)
776 {
777         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
778
779         getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
780 }
781
782 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
783                                                 struct ia64_fpreg *val)
784 {
785         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
786
787         if (reg > 1)
788                 setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
789 }
790
791 /************************************************************************
792  * lsapic timer
793  ***********************************************************************/
794 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
795 {
796         unsigned long guest_itc;
797         guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
798
799         if (guest_itc >= VMX(vcpu, last_itc)) {
800                 VMX(vcpu, last_itc) = guest_itc;
801                 return  guest_itc;
802         } else
803                 return VMX(vcpu, last_itc);
804 }
805
806 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
807 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
808 {
809         struct kvm_vcpu *v;
810         int i;
811         long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
812         unsigned long vitv = VCPU(vcpu, itv);
813
814         if (vcpu->vcpu_id == 0) {
815                 for (i = 0; i < KVM_MAX_VCPUS; i++) {
816                         v = (struct kvm_vcpu *)((char *)vcpu +
817                                         sizeof(struct kvm_vcpu_data) * i);
818                         VMX(v, itc_offset) = itc_offset;
819                         VMX(v, last_itc) = 0;
820                 }
821         }
822         VMX(vcpu, last_itc) = 0;
823         if (VCPU(vcpu, itm) <= val) {
824                 VMX(vcpu, itc_check) = 0;
825                 vcpu_unpend_interrupt(vcpu, vitv);
826         } else {
827                 VMX(vcpu, itc_check) = 1;
828                 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
829         }
830
831 }
832
833 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
834 {
835         return ((u64)VCPU(vcpu, itm));
836 }
837
838 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
839 {
840         unsigned long vitv = VCPU(vcpu, itv);
841         VCPU(vcpu, itm) = val;
842
843         if (val > vcpu_get_itc(vcpu)) {
844                 VMX(vcpu, itc_check) = 1;
845                 vcpu_unpend_interrupt(vcpu, vitv);
846                 VMX(vcpu, timer_pending) = 0;
847         } else
848                 VMX(vcpu, itc_check) = 0;
849 }
850
851 #define  ITV_VECTOR(itv)    (itv&0xff)
852 #define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
853
854 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
855 {
856         VCPU(vcpu, itv) = val;
857         if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
858                 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
859                 vcpu->arch.timer_pending = 0;
860         }
861 }
862
863 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
864 {
865         int vec;
866
867         vec = highest_inservice_irq(vcpu);
868         if (vec == NULL_VECTOR)
869                 return;
870         VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
871         VCPU(vcpu, eoi) = 0;
872         vcpu->arch.irq_new_pending = 1;
873
874 }
875
876 /* See Table 5-8 in SDM vol2 for the definition */
877 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
878 {
879         union ia64_tpr vtpr;
880
881         vtpr.val = VCPU(vcpu, tpr);
882
883         if (h_inservice == NMI_VECTOR)
884                 return IRQ_MASKED_BY_INSVC;
885
886         if (h_pending == NMI_VECTOR) {
887                 /* Non Maskable Interrupt */
888                 return IRQ_NO_MASKED;
889         }
890
891         if (h_inservice == ExtINT_VECTOR)
892                 return IRQ_MASKED_BY_INSVC;
893
894         if (h_pending == ExtINT_VECTOR) {
895                 if (vtpr.mmi) {
896                         /* mask all external IRQ */
897                         return IRQ_MASKED_BY_VTPR;
898                 } else
899                         return IRQ_NO_MASKED;
900         }
901
902         if (is_higher_irq(h_pending, h_inservice)) {
903                 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
904                         return IRQ_NO_MASKED;
905                 else
906                         return IRQ_MASKED_BY_VTPR;
907         } else {
908                 return IRQ_MASKED_BY_INSVC;
909         }
910 }
911
912 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
913 {
914         long spsr;
915         int ret;
916
917         local_irq_save(spsr);
918         ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
919         local_irq_restore(spsr);
920
921         vcpu->arch.irq_new_pending = 1;
922 }
923
924 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
925 {
926         long spsr;
927         int ret;
928
929         local_irq_save(spsr);
930         ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
931         local_irq_restore(spsr);
932         if (ret) {
933                 vcpu->arch.irq_new_pending = 1;
934                 wmb();
935         }
936 }
937
938 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
939 {
940         u64 vhpi;
941
942         if (vec == NULL_VECTOR)
943                 vhpi = 0;
944         else if (vec == NMI_VECTOR)
945                 vhpi = 32;
946         else if (vec == ExtINT_VECTOR)
947                 vhpi = 16;
948         else
949                 vhpi = vec >> 4;
950
951         VCPU(vcpu, vhpi) = vhpi;
952         if (VCPU(vcpu, vac).a_int)
953                 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
954                                 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
955 }
956
957 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
958 {
959         int vec, h_inservice, mask;
960
961         vec = highest_pending_irq(vcpu);
962         h_inservice = highest_inservice_irq(vcpu);
963         mask = irq_masked(vcpu, vec, h_inservice);
964         if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
965                 if (VCPU(vcpu, vhpi))
966                         update_vhpi(vcpu, NULL_VECTOR);
967                 return IA64_SPURIOUS_INT_VECTOR;
968         }
969         if (mask == IRQ_MASKED_BY_VTPR) {
970                 update_vhpi(vcpu, vec);
971                 return IA64_SPURIOUS_INT_VECTOR;
972         }
973         VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
974         vcpu_unpend_interrupt(vcpu, vec);
975         return  (u64)vec;
976 }
977
978 /**************************************************************************
979   Privileged operation emulation routines
980  **************************************************************************/
981 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
982 {
983         union ia64_pta vpta;
984         union ia64_rr vrr;
985         u64 pval;
986         u64 vhpt_offset;
987
988         vpta.val = vcpu_get_pta(vcpu);
989         vrr.val = vcpu_get_rr(vcpu, vadr);
990         vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
991         if (vpta.vf) {
992                 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
993                                 vpta.val, 0, 0, 0, 0);
994         } else {
995                 pval = (vadr & VRN_MASK) | vhpt_offset |
996                         (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
997         }
998         return  pval;
999 }
1000
1001 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1002 {
1003         union ia64_rr vrr;
1004         union ia64_pta vpta;
1005         u64 pval;
1006
1007         vpta.val = vcpu_get_pta(vcpu);
1008         vrr.val = vcpu_get_rr(vcpu, vadr);
1009         if (vpta.vf) {
1010                 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1011                                                 0, 0, 0, 0, 0);
1012         } else
1013                 pval = 1;
1014
1015         return  pval;
1016 }
1017
1018 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1019 {
1020         struct thash_data *data;
1021         union ia64_pta vpta;
1022         u64 key;
1023
1024         vpta.val = vcpu_get_pta(vcpu);
1025         if (vpta.vf == 0) {
1026                 key = 1;
1027                 return key;
1028         }
1029         data = vtlb_lookup(vcpu, vadr, D_TLB);
1030         if (!data || !data->p)
1031                 key = 1;
1032         else
1033                 key = data->key;
1034
1035         return key;
1036 }
1037
1038 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1039 {
1040         unsigned long thash, vadr;
1041
1042         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1043         thash = vcpu_thash(vcpu, vadr);
1044         vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1045 }
1046
1047 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1048 {
1049         unsigned long tag, vadr;
1050
1051         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1052         tag = vcpu_ttag(vcpu, vadr);
1053         vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1054 }
1055
1056 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1057 {
1058         struct thash_data *data;
1059         union ia64_isr visr, pt_isr;
1060         struct kvm_pt_regs *regs;
1061         struct ia64_psr vpsr;
1062
1063         regs = vcpu_regs(vcpu);
1064         pt_isr.val = VMX(vcpu, cr_isr);
1065         visr.val = 0;
1066         visr.ei = pt_isr.ei;
1067         visr.ir = pt_isr.ir;
1068         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1069         visr.na = 1;
1070
1071         data = vhpt_lookup(vadr);
1072         if (data) {
1073                 if (data->p == 0) {
1074                         vcpu_set_isr(vcpu, visr.val);
1075                         data_page_not_present(vcpu, vadr);
1076                         return IA64_FAULT;
1077                 } else if (data->ma == VA_MATTR_NATPAGE) {
1078                         vcpu_set_isr(vcpu, visr.val);
1079                         dnat_page_consumption(vcpu, vadr);
1080                         return IA64_FAULT;
1081                 } else {
1082                         *padr = (data->gpaddr >> data->ps << data->ps) |
1083                                 (vadr & (PSIZE(data->ps) - 1));
1084                         return IA64_NO_FAULT;
1085                 }
1086         }
1087
1088         data = vtlb_lookup(vcpu, vadr, D_TLB);
1089         if (data) {
1090                 if (data->p == 0) {
1091                         vcpu_set_isr(vcpu, visr.val);
1092                         data_page_not_present(vcpu, vadr);
1093                         return IA64_FAULT;
1094                 } else if (data->ma == VA_MATTR_NATPAGE) {
1095                         vcpu_set_isr(vcpu, visr.val);
1096                         dnat_page_consumption(vcpu, vadr);
1097                         return IA64_FAULT;
1098                 } else{
1099                         *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1100                                 | (vadr & (PSIZE(data->ps) - 1));
1101                         return IA64_NO_FAULT;
1102                 }
1103         }
1104         if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1105                 if (vpsr.ic) {
1106                         vcpu_set_isr(vcpu, visr.val);
1107                         alt_dtlb(vcpu, vadr);
1108                         return IA64_FAULT;
1109                 } else {
1110                         nested_dtlb(vcpu);
1111                         return IA64_FAULT;
1112                 }
1113         } else {
1114                 if (vpsr.ic) {
1115                         vcpu_set_isr(vcpu, visr.val);
1116                         dvhpt_fault(vcpu, vadr);
1117                         return IA64_FAULT;
1118                 } else{
1119                         nested_dtlb(vcpu);
1120                         return IA64_FAULT;
1121                 }
1122         }
1123
1124         return IA64_NO_FAULT;
1125 }
1126
1127 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1128 {
1129         unsigned long r1, r3;
1130
1131         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1132
1133         if (vcpu_tpa(vcpu, r3, &r1))
1134                 return IA64_FAULT;
1135
1136         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1137         return(IA64_NO_FAULT);
1138 }
1139
1140 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1141 {
1142         unsigned long r1, r3;
1143
1144         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1145         r1 = vcpu_tak(vcpu, r3);
1146         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1147 }
1148
1149 /************************************
1150  * Insert/Purge translation register/cache
1151  ************************************/
1152 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1153 {
1154         thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1155 }
1156
1157 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1158 {
1159         thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1160 }
1161
1162 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1163 {
1164         u64 ps, va, rid;
1165         struct thash_data *p_itr;
1166
1167         ps = itir_ps(itir);
1168         va = PAGEALIGN(ifa, ps);
1169         pte &= ~PAGE_FLAGS_RV_MASK;
1170         rid = vcpu_get_rr(vcpu, ifa);
1171         rid = rid & RR_RID_MASK;
1172         p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1173         vcpu_set_tr(p_itr, pte, itir, va, rid);
1174         vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1175 }
1176
1177
1178 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1179 {
1180         u64 gpfn;
1181         u64 ps, va, rid;
1182         struct thash_data *p_dtr;
1183
1184         ps = itir_ps(itir);
1185         va = PAGEALIGN(ifa, ps);
1186         pte &= ~PAGE_FLAGS_RV_MASK;
1187
1188         if (ps != _PAGE_SIZE_16M)
1189                 thash_purge_entries(vcpu, va, ps);
1190         gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1191         if (__gpfn_is_io(gpfn))
1192                 pte |= VTLB_PTE_IO;
1193         rid = vcpu_get_rr(vcpu, va);
1194         rid = rid & RR_RID_MASK;
1195         p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1196         vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1197                                                         pte, itir, va, rid);
1198         vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1199 }
1200
1201 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1202 {
1203         int index;
1204         u64 va;
1205
1206         va = PAGEALIGN(ifa, ps);
1207         while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1208                 vcpu->arch.dtrs[index].page_flags = 0;
1209
1210         thash_purge_entries(vcpu, va, ps);
1211 }
1212
1213 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1214 {
1215         int index;
1216         u64 va;
1217
1218         va = PAGEALIGN(ifa, ps);
1219         while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1220                 vcpu->arch.itrs[index].page_flags = 0;
1221
1222         thash_purge_entries(vcpu, va, ps);
1223 }
1224
1225 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1226 {
1227         va = PAGEALIGN(va, ps);
1228         thash_purge_entries(vcpu, va, ps);
1229 }
1230
1231 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1232 {
1233         thash_purge_all(vcpu);
1234 }
1235
1236 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1237 {
1238         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1239         long psr;
1240         local_irq_save(psr);
1241         p->exit_reason = EXIT_REASON_PTC_G;
1242
1243         p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1244         p->u.ptc_g_data.vaddr = va;
1245         p->u.ptc_g_data.ps = ps;
1246         vmm_transition(vcpu);
1247         /* Do Local Purge Here*/
1248         vcpu_ptc_l(vcpu, va, ps);
1249         local_irq_restore(psr);
1250 }
1251
1252
1253 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1254 {
1255         vcpu_ptc_ga(vcpu, va, ps);
1256 }
1257
1258 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1259 {
1260         unsigned long ifa;
1261
1262         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1263         vcpu_ptc_e(vcpu, ifa);
1264 }
1265
1266 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1267 {
1268         unsigned long ifa, itir;
1269
1270         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1271         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1272         vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1273 }
1274
1275 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1276 {
1277         unsigned long ifa, itir;
1278
1279         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1280         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1281         vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1282 }
1283
1284 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1285 {
1286         unsigned long ifa, itir;
1287
1288         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1289         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1290         vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1291 }
1292
1293 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1294 {
1295         unsigned long ifa, itir;
1296
1297         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1298         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1299         vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1300 }
1301
1302 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1303 {
1304         unsigned long ifa, itir;
1305
1306         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1307         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1308         vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1309 }
1310
1311 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1312 {
1313         unsigned long itir, ifa, pte, slot;
1314
1315         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1316         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1317         itir = vcpu_get_itir(vcpu);
1318         ifa = vcpu_get_ifa(vcpu);
1319         vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1320 }
1321
1322
1323
1324 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1325 {
1326         unsigned long itir, ifa, pte, slot;
1327
1328         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1329         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1330         itir = vcpu_get_itir(vcpu);
1331         ifa = vcpu_get_ifa(vcpu);
1332         vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1333 }
1334
1335 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1336 {
1337         unsigned long itir, ifa, pte;
1338
1339         itir = vcpu_get_itir(vcpu);
1340         ifa = vcpu_get_ifa(vcpu);
1341         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1342         vcpu_itc_d(vcpu, pte, itir, ifa);
1343 }
1344
1345 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1346 {
1347         unsigned long itir, ifa, pte;
1348
1349         itir = vcpu_get_itir(vcpu);
1350         ifa = vcpu_get_ifa(vcpu);
1351         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1352         vcpu_itc_i(vcpu, pte, itir, ifa);
1353 }
1354
1355 /*************************************
1356  * Moves to semi-privileged registers
1357  *************************************/
1358
1359 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1360 {
1361         unsigned long imm;
1362
1363         if (inst.M30.s)
1364                 imm = -inst.M30.imm;
1365         else
1366                 imm = inst.M30.imm;
1367
1368         vcpu_set_itc(vcpu, imm);
1369 }
1370
1371 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1372 {
1373         unsigned long r2;
1374
1375         r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1376         vcpu_set_itc(vcpu, r2);
1377 }
1378
1379 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1380 {
1381         unsigned long r1;
1382
1383         r1 = vcpu_get_itc(vcpu);
1384         vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1385 }
1386
1387 /**************************************************************************
1388   struct kvm_vcpu protection key register access routines
1389  **************************************************************************/
1390
1391 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1392 {
1393         return ((unsigned long)ia64_get_pkr(reg));
1394 }
1395
1396 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1397 {
1398         ia64_set_pkr(reg, val);
1399 }
1400
1401 /********************************
1402  * Moves to privileged registers
1403  ********************************/
1404 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1405                                         unsigned long val)
1406 {
1407         union ia64_rr oldrr, newrr;
1408         unsigned long rrval;
1409         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1410         unsigned long psr;
1411
1412         oldrr.val = vcpu_get_rr(vcpu, reg);
1413         newrr.val = val;
1414         vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1415
1416         switch ((unsigned long)(reg >> VRN_SHIFT)) {
1417         case VRN6:
1418                 vcpu->arch.vmm_rr = vrrtomrr(val);
1419                 local_irq_save(psr);
1420                 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1421                 vmm_transition(vcpu);
1422                 local_irq_restore(psr);
1423                 break;
1424         case VRN4:
1425                 rrval = vrrtomrr(val);
1426                 vcpu->arch.metaphysical_saved_rr4 = rrval;
1427                 if (!is_physical_mode(vcpu))
1428                         ia64_set_rr(reg, rrval);
1429                 break;
1430         case VRN0:
1431                 rrval = vrrtomrr(val);
1432                 vcpu->arch.metaphysical_saved_rr0 = rrval;
1433                 if (!is_physical_mode(vcpu))
1434                         ia64_set_rr(reg, rrval);
1435                 break;
1436         default:
1437                 ia64_set_rr(reg, vrrtomrr(val));
1438                 break;
1439         }
1440
1441         return (IA64_NO_FAULT);
1442 }
1443
1444 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1445 {
1446         unsigned long r3, r2;
1447
1448         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1449         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1450         vcpu_set_rr(vcpu, r3, r2);
1451 }
1452
1453 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1454 {
1455 }
1456
1457 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1458 {
1459 }
1460
1461 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1462 {
1463         unsigned long r3, r2;
1464
1465         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1466         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1467         vcpu_set_pmc(vcpu, r3, r2);
1468 }
1469
1470 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1471 {
1472         unsigned long r3, r2;
1473
1474         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1475         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1476         vcpu_set_pmd(vcpu, r3, r2);
1477 }
1478
1479 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1480 {
1481         u64 r3, r2;
1482
1483         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1484         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1485         vcpu_set_pkr(vcpu, r3, r2);
1486 }
1487
1488 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1489 {
1490         unsigned long r3, r1;
1491
1492         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1493         r1 = vcpu_get_rr(vcpu, r3);
1494         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1495 }
1496
1497 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1498 {
1499         unsigned long r3, r1;
1500
1501         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1502         r1 = vcpu_get_pkr(vcpu, r3);
1503         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1504 }
1505
1506 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1507 {
1508         unsigned long r3, r1;
1509
1510         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1511         r1 = vcpu_get_dbr(vcpu, r3);
1512         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1513 }
1514
1515 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1516 {
1517         unsigned long r3, r1;
1518
1519         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1520         r1 = vcpu_get_ibr(vcpu, r3);
1521         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1522 }
1523
1524 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1525 {
1526         unsigned long r3, r1;
1527
1528         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1529         r1 = vcpu_get_pmc(vcpu, r3);
1530         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1531 }
1532
1533 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1534 {
1535         /* FIXME: This could get called as a result of a rsvd-reg fault */
1536         if (reg > (ia64_get_cpuid(3) & 0xff))
1537                 return 0;
1538         else
1539                 return ia64_get_cpuid(reg);
1540 }
1541
1542 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1543 {
1544         unsigned long r3, r1;
1545
1546         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1547         r1 = vcpu_get_cpuid(vcpu, r3);
1548         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1549 }
1550
1551 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1552 {
1553         VCPU(vcpu, tpr) = val;
1554         vcpu->arch.irq_check = 1;
1555 }
1556
1557 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1558 {
1559         unsigned long r2;
1560
1561         r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1562         VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1563
1564         switch (inst.M32.cr3) {
1565         case 0:
1566                 vcpu_set_dcr(vcpu, r2);
1567                 break;
1568         case 1:
1569                 vcpu_set_itm(vcpu, r2);
1570                 break;
1571         case 66:
1572                 vcpu_set_tpr(vcpu, r2);
1573                 break;
1574         case 67:
1575                 vcpu_set_eoi(vcpu, r2);
1576                 break;
1577         default:
1578                 break;
1579         }
1580
1581         return 0;
1582 }
1583
1584 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1585 {
1586         unsigned long tgt = inst.M33.r1;
1587         unsigned long val;
1588
1589         switch (inst.M33.cr3) {
1590         case 65:
1591                 val = vcpu_get_ivr(vcpu);
1592                 vcpu_set_gr(vcpu, tgt, val, 0);
1593                 break;
1594
1595         case 67:
1596                 vcpu_set_gr(vcpu, tgt, 0L, 0);
1597                 break;
1598         default:
1599                 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1600                 vcpu_set_gr(vcpu, tgt, val, 0);
1601                 break;
1602         }
1603
1604         return 0;
1605 }
1606
1607 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1608 {
1609
1610         unsigned long mask;
1611         struct kvm_pt_regs *regs;
1612         struct ia64_psr old_psr, new_psr;
1613
1614         old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1615
1616         regs = vcpu_regs(vcpu);
1617         /* We only support guest as:
1618          *  vpsr.pk = 0
1619          *  vpsr.is = 0
1620          * Otherwise panic
1621          */
1622         if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1623                 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1624                                 & vpsr.is=0\n");
1625
1626         /*
1627          * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1628          * Since these bits will become 0, after success execution of each
1629          * instruction, we will change set them to mIA64_PSR
1630          */
1631         VCPU(vcpu, vpsr) = val
1632                 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1633                         IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1634
1635         if (!old_psr.i && (val & IA64_PSR_I)) {
1636                 /* vpsr.i 0->1 */
1637                 vcpu->arch.irq_check = 1;
1638         }
1639         new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1640
1641         /*
1642          * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1643          * , except for the following bits:
1644          *  ic/i/dt/si/rt/mc/it/bn/vm
1645          */
1646         mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1647                 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1648                 IA64_PSR_VM;
1649
1650         regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1651
1652         check_mm_mode_switch(vcpu, old_psr, new_psr);
1653
1654         return ;
1655 }
1656
1657 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1658 {
1659         struct ia64_psr vpsr;
1660
1661         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1662         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1663
1664         if (!vpsr.ic)
1665                 VCPU(vcpu, ifs) = regs->cr_ifs;
1666         regs->cr_ifs = IA64_IFS_V;
1667         return (IA64_NO_FAULT);
1668 }
1669
1670
1671
1672 /**************************************************************************
1673   VCPU banked general register access routines
1674  **************************************************************************/
1675 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1676         do {                                                            \
1677                 __asm__ __volatile__ (                                  \
1678                                 ";;extr.u %0 = %3,%6,16;;\n"            \
1679                                 "dep %1 = %0, %1, 0, 16;;\n"            \
1680                                 "st8 [%4] = %1\n"                       \
1681                                 "extr.u %0 = %2, 16, 16;;\n"            \
1682                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1683                                 "st8 [%5] = %3\n"                       \
1684                                 ::"r"(i), "r"(*b1unat), "r"(*b0unat),   \
1685                                 "r"(*runat), "r"(b1unat), "r"(runat),   \
1686                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1687         } while (0)
1688
1689 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1690 {
1691         unsigned long i;
1692
1693         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1694         unsigned long *r = &regs->r16;
1695         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1696         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1697         unsigned long *runat = &regs->eml_unat;
1698         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1699         unsigned long *b1unat = &VCPU(vcpu, vnat);
1700
1701
1702         if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1703                 for (i = 0; i < 16; i++) {
1704                         *b1++ = *r;
1705                         *r++ = *b0++;
1706                 }
1707                 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1708                                 VMM_PT_REGS_R16_SLOT);
1709                 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1710         }
1711 }
1712
1713 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1714         do {                                                            \
1715                 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"    \
1716                                 "dep %1 = %0, %1, 16, 16;;\n"           \
1717                                 "st8 [%4] = %1\n"                       \
1718                                 "extr.u %0 = %2, 0, 16;;\n"             \
1719                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1720                                 "st8 [%5] = %3\n"                       \
1721                                 ::"r"(i), "r"(*b0unat), "r"(*b1unat),   \
1722                                 "r"(*runat), "r"(b0unat), "r"(runat),   \
1723                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1724         } while (0)
1725
1726 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1727 {
1728         unsigned long i;
1729         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1730         unsigned long *r = &regs->r16;
1731         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1732         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1733         unsigned long *runat = &regs->eml_unat;
1734         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1735         unsigned long *b1unat = &VCPU(vcpu, vnat);
1736
1737         if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1738                 for (i = 0; i < 16; i++) {
1739                         *b0++ = *r;
1740                         *r++ = *b1++;
1741                 }
1742                 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1743                                 VMM_PT_REGS_R16_SLOT);
1744                 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1745         }
1746 }
1747
1748 void vcpu_rfi(struct kvm_vcpu *vcpu)
1749 {
1750         unsigned long ifs, psr;
1751         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1752
1753         psr = VCPU(vcpu, ipsr);
1754         if (psr & IA64_PSR_BN)
1755                 vcpu_bsw1(vcpu);
1756         else
1757                 vcpu_bsw0(vcpu);
1758         vcpu_set_psr(vcpu, psr);
1759         ifs = VCPU(vcpu, ifs);
1760         if (ifs >> 63)
1761                 regs->cr_ifs = ifs;
1762         regs->cr_iip = VCPU(vcpu, iip);
1763 }
1764
1765 /*
1766    VPSR can't keep track of below bits of guest PSR
1767    This function gets guest PSR
1768  */
1769
1770 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1771 {
1772         unsigned long mask;
1773         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1774
1775         mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1776                 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1777         return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1778 }
1779
1780 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1781 {
1782         unsigned long vpsr;
1783         unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1784                                         | inst.M44.imm;
1785
1786         vpsr = vcpu_get_psr(vcpu);
1787         vpsr &= (~imm24);
1788         vcpu_set_psr(vcpu, vpsr);
1789 }
1790
1791 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1792 {
1793         unsigned long vpsr;
1794         unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1795                                 | inst.M44.imm;
1796
1797         vpsr = vcpu_get_psr(vcpu);
1798         vpsr |= imm24;
1799         vcpu_set_psr(vcpu, vpsr);
1800 }
1801
1802 /* Generate Mask
1803  * Parameter:
1804  *  bit -- starting bit
1805  *  len -- how many bits
1806  */
1807 #define MASK(bit,len)                                   \
1808 ({                                                      \
1809                 __u64   ret;                            \
1810                                                         \
1811                 __asm __volatile("dep %0=-1, r0, %1, %2"\
1812                                 : "=r" (ret):           \
1813                   "M" (bit),                            \
1814                   "M" (len));                           \
1815                 ret;                                    \
1816 })
1817
1818 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1819 {
1820         val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1821         vcpu_set_psr(vcpu, val);
1822 }
1823
1824 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1825 {
1826         unsigned long val;
1827
1828         val = vcpu_get_gr(vcpu, inst.M35.r2);
1829         vcpu_set_psr_l(vcpu, val);
1830 }
1831
1832 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1833 {
1834         unsigned long val;
1835
1836         val = vcpu_get_psr(vcpu);
1837         val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1838         vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1839 }
1840
1841 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1842 {
1843         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1844         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1845         if (ipsr->ri == 2) {
1846                 ipsr->ri = 0;
1847                 regs->cr_iip += 16;
1848         } else
1849                 ipsr->ri++;
1850 }
1851
1852 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1853 {
1854         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1855         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1856
1857         if (ipsr->ri == 0) {
1858                 ipsr->ri = 2;
1859                 regs->cr_iip -= 16;
1860         } else
1861                 ipsr->ri--;
1862 }
1863
1864 /** Emulate a privileged operation.
1865  *
1866  *
1867  * @param vcpu virtual cpu
1868  * @cause the reason cause virtualization fault
1869  * @opcode the instruction code which cause virtualization fault
1870  */
1871
1872 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1873 {
1874         unsigned long status, cause, opcode ;
1875         INST64 inst;
1876
1877         status = IA64_NO_FAULT;
1878         cause = VMX(vcpu, cause);
1879         opcode = VMX(vcpu, opcode);
1880         inst.inst = opcode;
1881         /*
1882          * Switch to actual virtual rid in rr0 and rr4,
1883          * which is required by some tlb related instructions.
1884          */
1885         prepare_if_physical_mode(vcpu);
1886
1887         switch (cause) {
1888         case EVENT_RSM:
1889                 kvm_rsm(vcpu, inst);
1890                 break;
1891         case EVENT_SSM:
1892                 kvm_ssm(vcpu, inst);
1893                 break;
1894         case EVENT_MOV_TO_PSR:
1895                 kvm_mov_to_psr(vcpu, inst);
1896                 break;
1897         case EVENT_MOV_FROM_PSR:
1898                 kvm_mov_from_psr(vcpu, inst);
1899                 break;
1900         case EVENT_MOV_FROM_CR:
1901                 kvm_mov_from_cr(vcpu, inst);
1902                 break;
1903         case EVENT_MOV_TO_CR:
1904                 kvm_mov_to_cr(vcpu, inst);
1905                 break;
1906         case EVENT_BSW_0:
1907                 vcpu_bsw0(vcpu);
1908                 break;
1909         case EVENT_BSW_1:
1910                 vcpu_bsw1(vcpu);
1911                 break;
1912         case EVENT_COVER:
1913                 vcpu_cover(vcpu);
1914                 break;
1915         case EVENT_RFI:
1916                 vcpu_rfi(vcpu);
1917                 break;
1918         case EVENT_ITR_D:
1919                 kvm_itr_d(vcpu, inst);
1920                 break;
1921         case EVENT_ITR_I:
1922                 kvm_itr_i(vcpu, inst);
1923                 break;
1924         case EVENT_PTR_D:
1925                 kvm_ptr_d(vcpu, inst);
1926                 break;
1927         case EVENT_PTR_I:
1928                 kvm_ptr_i(vcpu, inst);
1929                 break;
1930         case EVENT_ITC_D:
1931                 kvm_itc_d(vcpu, inst);
1932                 break;
1933         case EVENT_ITC_I:
1934                 kvm_itc_i(vcpu, inst);
1935                 break;
1936         case EVENT_PTC_L:
1937                 kvm_ptc_l(vcpu, inst);
1938                 break;
1939         case EVENT_PTC_G:
1940                 kvm_ptc_g(vcpu, inst);
1941                 break;
1942         case EVENT_PTC_GA:
1943                 kvm_ptc_ga(vcpu, inst);
1944                 break;
1945         case EVENT_PTC_E:
1946                 kvm_ptc_e(vcpu, inst);
1947                 break;
1948         case EVENT_MOV_TO_RR:
1949                 kvm_mov_to_rr(vcpu, inst);
1950                 break;
1951         case EVENT_MOV_FROM_RR:
1952                 kvm_mov_from_rr(vcpu, inst);
1953                 break;
1954         case EVENT_THASH:
1955                 kvm_thash(vcpu, inst);
1956                 break;
1957         case EVENT_TTAG:
1958                 kvm_ttag(vcpu, inst);
1959                 break;
1960         case EVENT_TPA:
1961                 status = kvm_tpa(vcpu, inst);
1962                 break;
1963         case EVENT_TAK:
1964                 kvm_tak(vcpu, inst);
1965                 break;
1966         case EVENT_MOV_TO_AR_IMM:
1967                 kvm_mov_to_ar_imm(vcpu, inst);
1968                 break;
1969         case EVENT_MOV_TO_AR:
1970                 kvm_mov_to_ar_reg(vcpu, inst);
1971                 break;
1972         case EVENT_MOV_FROM_AR:
1973                 kvm_mov_from_ar_reg(vcpu, inst);
1974                 break;
1975         case EVENT_MOV_TO_DBR:
1976                 kvm_mov_to_dbr(vcpu, inst);
1977                 break;
1978         case EVENT_MOV_TO_IBR:
1979                 kvm_mov_to_ibr(vcpu, inst);
1980                 break;
1981         case EVENT_MOV_TO_PMC:
1982                 kvm_mov_to_pmc(vcpu, inst);
1983                 break;
1984         case EVENT_MOV_TO_PMD:
1985                 kvm_mov_to_pmd(vcpu, inst);
1986                 break;
1987         case EVENT_MOV_TO_PKR:
1988                 kvm_mov_to_pkr(vcpu, inst);
1989                 break;
1990         case EVENT_MOV_FROM_DBR:
1991                 kvm_mov_from_dbr(vcpu, inst);
1992                 break;
1993         case EVENT_MOV_FROM_IBR:
1994                 kvm_mov_from_ibr(vcpu, inst);
1995                 break;
1996         case EVENT_MOV_FROM_PMC:
1997                 kvm_mov_from_pmc(vcpu, inst);
1998                 break;
1999         case EVENT_MOV_FROM_PKR:
2000                 kvm_mov_from_pkr(vcpu, inst);
2001                 break;
2002         case EVENT_MOV_FROM_CPUID:
2003                 kvm_mov_from_cpuid(vcpu, inst);
2004                 break;
2005         case EVENT_VMSW:
2006                 status = IA64_FAULT;
2007                 break;
2008         default:
2009                 break;
2010         };
2011         /*Assume all status is NO_FAULT ?*/
2012         if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2013                 vcpu_increment_iip(vcpu);
2014
2015         recover_if_physical_mode(vcpu);
2016 }
2017
2018 void init_vcpu(struct kvm_vcpu *vcpu)
2019 {
2020         int i;
2021
2022         vcpu->arch.mode_flags = GUEST_IN_PHY;
2023         VMX(vcpu, vrr[0]) = 0x38;
2024         VMX(vcpu, vrr[1]) = 0x38;
2025         VMX(vcpu, vrr[2]) = 0x38;
2026         VMX(vcpu, vrr[3]) = 0x38;
2027         VMX(vcpu, vrr[4]) = 0x38;
2028         VMX(vcpu, vrr[5]) = 0x38;
2029         VMX(vcpu, vrr[6]) = 0x38;
2030         VMX(vcpu, vrr[7]) = 0x38;
2031         VCPU(vcpu, vpsr) = IA64_PSR_BN;
2032         VCPU(vcpu, dcr) = 0;
2033         /* pta.size must not be 0.  The minimum is 15 (32k) */
2034         VCPU(vcpu, pta) = 15 << 2;
2035         VCPU(vcpu, itv) = 0x10000;
2036         VCPU(vcpu, itm) = 0;
2037         VMX(vcpu, last_itc) = 0;
2038
2039         VCPU(vcpu, lid) = VCPU_LID(vcpu);
2040         VCPU(vcpu, ivr) = 0;
2041         VCPU(vcpu, tpr) = 0x10000;
2042         VCPU(vcpu, eoi) = 0;
2043         VCPU(vcpu, irr[0]) = 0;
2044         VCPU(vcpu, irr[1]) = 0;
2045         VCPU(vcpu, irr[2]) = 0;
2046         VCPU(vcpu, irr[3]) = 0;
2047         VCPU(vcpu, pmv) = 0x10000;
2048         VCPU(vcpu, cmcv) = 0x10000;
2049         VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2050         VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2051         update_vhpi(vcpu, NULL_VECTOR);
2052         VLSAPIC_XTP(vcpu) = 0x80;       /* disabled */
2053
2054         for (i = 0; i < 4; i++)
2055                 VLSAPIC_INSVC(vcpu, i) = 0;
2056 }
2057
2058 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2059 {
2060         unsigned long psr;
2061
2062         local_irq_save(psr);
2063
2064         /* WARNING: not allow co-exist of both virtual mode and physical
2065          * mode in same region
2066          */
2067
2068         vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2069         vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2070
2071         if (is_physical_mode(vcpu)) {
2072                 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2073                         panic_vm(vcpu, "Machine Status conflicts!\n");
2074
2075                 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2076                 ia64_dv_serialize_data();
2077                 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2078                 ia64_dv_serialize_data();
2079         } else {
2080                 ia64_set_rr((VRN0 << VRN_SHIFT),
2081                                 vcpu->arch.metaphysical_saved_rr0);
2082                 ia64_dv_serialize_data();
2083                 ia64_set_rr((VRN4 << VRN_SHIFT),
2084                                 vcpu->arch.metaphysical_saved_rr4);
2085                 ia64_dv_serialize_data();
2086         }
2087         ia64_set_rr((VRN1 << VRN_SHIFT),
2088                         vrrtomrr(VMX(vcpu, vrr[VRN1])));
2089         ia64_dv_serialize_data();
2090         ia64_set_rr((VRN2 << VRN_SHIFT),
2091                         vrrtomrr(VMX(vcpu, vrr[VRN2])));
2092         ia64_dv_serialize_data();
2093         ia64_set_rr((VRN3 << VRN_SHIFT),
2094                         vrrtomrr(VMX(vcpu, vrr[VRN3])));
2095         ia64_dv_serialize_data();
2096         ia64_set_rr((VRN5 << VRN_SHIFT),
2097                         vrrtomrr(VMX(vcpu, vrr[VRN5])));
2098         ia64_dv_serialize_data();
2099         ia64_set_rr((VRN7 << VRN_SHIFT),
2100                         vrrtomrr(VMX(vcpu, vrr[VRN7])));
2101         ia64_dv_serialize_data();
2102         ia64_srlz_d();
2103         ia64_set_psr(psr);
2104 }
2105
2106 int vmm_entry(void)
2107 {
2108         struct kvm_vcpu *v;
2109         v = current_vcpu;
2110
2111         ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2112                                                 0, 0, 0, 0, 0, 0);
2113         kvm_init_vtlb(v);
2114         kvm_init_vhpt(v);
2115         init_vcpu(v);
2116         kvm_init_all_rr(v);
2117         vmm_reset_entry();
2118
2119         return 0;
2120 }
2121
2122 static void kvm_show_registers(struct kvm_pt_regs *regs)
2123 {
2124         unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2125
2126         struct kvm_vcpu *vcpu = current_vcpu;
2127         if (vcpu != NULL)
2128                 printk("vcpu 0x%p vcpu %d\n",
2129                        vcpu, vcpu->vcpu_id);
2130
2131         printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
2132                regs->cr_ipsr, regs->cr_ifs, ip);
2133
2134         printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2135                regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2136         printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
2137                regs->ar_rnat, regs->ar_bspstore, regs->pr);
2138         printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2139                regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2140         printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2141         printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
2142                                                         regs->b6, regs->b7);
2143         printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
2144                regs->f6.u.bits[1], regs->f6.u.bits[0],
2145                regs->f7.u.bits[1], regs->f7.u.bits[0]);
2146         printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
2147                regs->f8.u.bits[1], regs->f8.u.bits[0],
2148                regs->f9.u.bits[1], regs->f9.u.bits[0]);
2149         printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2150                regs->f10.u.bits[1], regs->f10.u.bits[0],
2151                regs->f11.u.bits[1], regs->f11.u.bits[0]);
2152
2153         printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
2154                                                         regs->r2, regs->r3);
2155         printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
2156                                                         regs->r9, regs->r10);
2157         printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2158                                                         regs->r12, regs->r13);
2159         printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2160                                                         regs->r15, regs->r16);
2161         printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2162                                                         regs->r18, regs->r19);
2163         printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2164                                                         regs->r21, regs->r22);
2165         printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2166                                                         regs->r24, regs->r25);
2167         printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2168                                                         regs->r27, regs->r28);
2169         printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2170                                                         regs->r30, regs->r31);
2171
2172 }
2173
2174 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2175 {
2176         va_list args;
2177         char buf[256];
2178
2179         struct kvm_pt_regs *regs = vcpu_regs(v);
2180         struct exit_ctl_data *p = &v->arch.exit_data;
2181         va_start(args, fmt);
2182         vsnprintf(buf, sizeof(buf), fmt, args);
2183         va_end(args);
2184         printk(buf);
2185         kvm_show_registers(regs);
2186         p->exit_reason = EXIT_REASON_VM_PANIC;
2187         vmm_transition(v);
2188         /*Never to return*/
2189         while (1);
2190 }