Merge branch 'linus' into core/futexes
[sfrench/cifs-2.6.git] / arch / ia64 / kvm / vcpu.c
1 /*
2  * kvm_vcpu.c: handling all virtual cpu related thing.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *  Shaofan Li (Susue Li) <susie.li@intel.com>
19  *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *  Xiantao Zhang <xiantao.zhang@intel.com>
22  */
23
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
26
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33
34 #include "asm-offsets.h"
35 #include "vcpu.h"
36
37 /*
38  * Special notes:
39  * - Index by it/dt/rt sequence
40  * - Only existing mode transitions are allowed in this table
41  * - RSE is placed at lazy mode when emulating guest partial mode
42  * - If gva happens to be rr0 and rr4, only allowed case is identity
43  *   mapping (gva=gpa), or panic! (How?)
44  */
45 int mm_switch_table[8][8] = {
46         /*  2004/09/12(Kevin): Allow switch to self */
47         /*
48          *  (it,dt,rt): (0,0,0) -> (1,1,1)
49          *  This kind of transition usually occurs in the very early
50          *  stage of Linux boot up procedure. Another case is in efi
51          *  and pal calls. (see "arch/ia64/kernel/head.S")
52          *
53          *  (it,dt,rt): (0,0,0) -> (0,1,1)
54          *  This kind of transition is found when OSYa exits efi boot
55          *  service. Due to gva = gpa in this case (Same region),
56          *  data access can be satisfied though itlb entry for physical
57          *  emulation is hit.
58          */
59         {SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60         {0,  0,  0,  0,  0,  0,  0,  0},
61         {0,  0,  0,  0,  0,  0,  0,  0},
62         /*
63          *  (it,dt,rt): (0,1,1) -> (1,1,1)
64          *  This kind of transition is found in OSYa.
65          *
66          *  (it,dt,rt): (0,1,1) -> (0,0,0)
67          *  This kind of transition is found in OSYa
68          */
69         {SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70         /* (1,0,0)->(1,1,1) */
71         {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72         /*
73          *  (it,dt,rt): (1,0,1) -> (1,1,1)
74          *  This kind of transition usually occurs when Linux returns
75          *  from the low level TLB miss handlers.
76          *  (see "arch/ia64/kernel/ivt.S")
77          */
78         {0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79         {0,  0,  0,  0,  0,  0,  0,  0},
80         /*
81          *  (it,dt,rt): (1,1,1) -> (1,0,1)
82          *  This kind of transition usually occurs in Linux low level
83          *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84          *
85          *  (it,dt,rt): (1,1,1) -> (0,0,0)
86          *  This kind of transition usually occurs in pal and efi calls,
87          *  which requires running in physical mode.
88          *  (see "arch/ia64/kernel/head.S")
89          *  (1,1,1)->(1,0,0)
90          */
91
92         {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93 };
94
95 void physical_mode_init(struct kvm_vcpu  *vcpu)
96 {
97         vcpu->arch.mode_flags = GUEST_IN_PHY;
98 }
99
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101 {
102         unsigned long psr;
103
104         /* Save original virtual mode rr[0] and rr[4] */
105         psr = ia64_clear_ic();
106         ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107         ia64_srlz_d();
108         ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109         ia64_srlz_d();
110
111         ia64_set_psr(psr);
112         return;
113 }
114
115 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
116 {
117         unsigned long psr;
118
119         psr = ia64_clear_ic();
120         ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
121         ia64_srlz_d();
122         ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
123         ia64_srlz_d();
124         ia64_set_psr(psr);
125         return;
126 }
127
128 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
129 {
130         return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
131 }
132
133 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
134                                         struct ia64_psr new_psr)
135 {
136         int act;
137         act = mm_switch_action(old_psr, new_psr);
138         switch (act) {
139         case SW_V2P:
140                 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141                 old_psr.val, new_psr.val);*/
142                 switch_to_physical_rid(vcpu);
143                 /*
144                  * Set rse to enforced lazy, to prevent active rse
145                  *save/restor when guest physical mode.
146                  */
147                 vcpu->arch.mode_flags |= GUEST_IN_PHY;
148                 break;
149         case SW_P2V:
150                 switch_to_virtual_rid(vcpu);
151                 /*
152                  * recover old mode which is saved when entering
153                  * guest physical mode
154                  */
155                 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
156                 break;
157         case SW_SELF:
158                 break;
159         case SW_NOP:
160                 break;
161         default:
162                 /* Sanity check */
163                 break;
164         }
165         return;
166 }
167
168 /*
169  * In physical mode, insert tc/tr for region 0 and 4 uses
170  * RID[0] and RID[4] which is for physical mode emulation.
171  * However what those inserted tc/tr wants is rid for
172  * virtual mode. So original virtual rid needs to be restored
173  * before insert.
174  *
175  * Operations which required such switch include:
176  *  - insertions (itc.*, itr.*)
177  *  - purges (ptc.* and ptr.*)
178  *  - tpa
179  *  - tak
180  *  - thash?, ttag?
181  * All above needs actual virtual rid for destination entry.
182  */
183
184 void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
185                                         struct ia64_psr new_psr)
186 {
187
188         if ((old_psr.dt != new_psr.dt)
189                         || (old_psr.it != new_psr.it)
190                         || (old_psr.rt != new_psr.rt))
191                 switch_mm_mode(vcpu, old_psr, new_psr);
192
193         return;
194 }
195
196
197 /*
198  * In physical mode, insert tc/tr for region 0 and 4 uses
199  * RID[0] and RID[4] which is for physical mode emulation.
200  * However what those inserted tc/tr wants is rid for
201  * virtual mode. So original virtual rid needs to be restored
202  * before insert.
203  *
204  * Operations which required such switch include:
205  *  - insertions (itc.*, itr.*)
206  *  - purges (ptc.* and ptr.*)
207  *  - tpa
208  *  - tak
209  *  - thash?, ttag?
210  * All above needs actual virtual rid for destination entry.
211  */
212
213 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
214 {
215         if (is_physical_mode(vcpu)) {
216                 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
217                 switch_to_virtual_rid(vcpu);
218         }
219         return;
220 }
221
222 /* Recover always follows prepare */
223 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
224 {
225         if (is_physical_mode(vcpu))
226                 switch_to_physical_rid(vcpu);
227         vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
228         return;
229 }
230
231 #define RPT(x)  ((u16) &((struct kvm_pt_regs *)0)->x)
232
233 static u16 gr_info[32] = {
234         0,      /* r0 is read-only : WE SHOULD NEVER GET THIS */
235         RPT(r1), RPT(r2), RPT(r3),
236         RPT(r4), RPT(r5), RPT(r6), RPT(r7),
237         RPT(r8), RPT(r9), RPT(r10), RPT(r11),
238         RPT(r12), RPT(r13), RPT(r14), RPT(r15),
239         RPT(r16), RPT(r17), RPT(r18), RPT(r19),
240         RPT(r20), RPT(r21), RPT(r22), RPT(r23),
241         RPT(r24), RPT(r25), RPT(r26), RPT(r27),
242         RPT(r28), RPT(r29), RPT(r30), RPT(r31)
243 };
244
245 #define IA64_FIRST_STACKED_GR   32
246 #define IA64_FIRST_ROTATING_FR  32
247
248 static inline unsigned long
249 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
250 {
251         reg += rrb;
252         if (reg >= sor)
253                 reg -= sor;
254         return reg;
255 }
256
257 /*
258  * Return the (rotated) index for floating point register
259  * be in the REGNUM (REGNUM must range from 32-127,
260  * result is in the range from 0-95.
261  */
262 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
263                                                 long regnum)
264 {
265         unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
266         return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
267 }
268
269 /*
270  * The inverse of the above: given bspstore and the number of
271  * registers, calculate ar.bsp.
272  */
273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
274                                                         long num_regs)
275 {
276         long delta = ia64_rse_slot_num(addr) + num_regs;
277         int i = 0;
278
279         if (num_regs < 0)
280                 delta -= 0x3e;
281         if (delta < 0) {
282                 while (delta <= -0x3f) {
283                         i--;
284                         delta += 0x3f;
285                 }
286         } else {
287                 while (delta >= 0x3f) {
288                         i++;
289                         delta -= 0x3f;
290                 }
291         }
292
293         return addr + num_regs + i;
294 }
295
296 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
297                                         unsigned long *val, int *nat)
298 {
299         unsigned long *bsp, *addr, *rnat_addr, *bspstore;
300         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
301         unsigned long nat_mask;
302         unsigned long old_rsc, new_rsc;
303         long sof = (regs->cr_ifs) & 0x7f;
304         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
305         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
306         long ridx = r1 - 32;
307
308         if (ridx < sor)
309                 ridx = rotate_reg(sor, rrb_gr, ridx);
310
311         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
312         new_rsc = old_rsc&(~(0x3));
313         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
314
315         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
316         bsp = kbs + (regs->loadrs >> 19);
317
318         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319         nat_mask = 1UL << ia64_rse_slot_num(addr);
320         rnat_addr = ia64_rse_rnat_addr(addr);
321
322         if (addr >= bspstore) {
323                 ia64_flushrs();
324                 ia64_mf();
325                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
326         }
327         *val = *addr;
328         if (nat) {
329                 if (bspstore < rnat_addr)
330                         *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
331                                                         & nat_mask);
332                 else
333                         *nat = (int)!!((*rnat_addr) & nat_mask);
334                 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
335         }
336 }
337
338 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
339                                 unsigned long val, unsigned long nat)
340 {
341         unsigned long *bsp, *bspstore, *addr, *rnat_addr;
342         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
343         unsigned long nat_mask;
344         unsigned long old_rsc, new_rsc, psr;
345         unsigned long rnat;
346         long sof = (regs->cr_ifs) & 0x7f;
347         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
348         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
349         long ridx = r1 - 32;
350
351         if (ridx < sor)
352                 ridx = rotate_reg(sor, rrb_gr, ridx);
353
354         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
355         /* put RSC to lazy mode, and set loadrs 0 */
356         new_rsc = old_rsc & (~0x3fff0003);
357         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
358         bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
359
360         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361         nat_mask = 1UL << ia64_rse_slot_num(addr);
362         rnat_addr = ia64_rse_rnat_addr(addr);
363
364         local_irq_save(psr);
365         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
366         if (addr >= bspstore) {
367
368                 ia64_flushrs();
369                 ia64_mf();
370                 *addr = val;
371                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
372                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
373                 if (bspstore < rnat_addr)
374                         rnat = rnat & (~nat_mask);
375                 else
376                         *rnat_addr = (*rnat_addr)&(~nat_mask);
377
378                 ia64_mf();
379                 ia64_loadrs();
380                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
381         } else {
382                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
383                 *addr = val;
384                 if (bspstore < rnat_addr)
385                         rnat = rnat&(~nat_mask);
386                 else
387                         *rnat_addr = (*rnat_addr) & (~nat_mask);
388
389                 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
390                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
391         }
392         local_irq_restore(psr);
393         ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
394 }
395
396 void getreg(unsigned long regnum, unsigned long *val,
397                                 int *nat, struct kvm_pt_regs *regs)
398 {
399         unsigned long addr, *unat;
400         if (regnum >= IA64_FIRST_STACKED_GR) {
401                 get_rse_reg(regs, regnum, val, nat);
402                 return;
403         }
404
405         /*
406          * Now look at registers in [0-31] range and init correct UNAT
407          */
408         addr = (unsigned long)regs;
409         unat = &regs->eml_unat;;
410
411         addr += gr_info[regnum];
412
413         *val  = *(unsigned long *)addr;
414         /*
415          * do it only when requested
416          */
417         if (nat)
418                 *nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
419 }
420
421 void setreg(unsigned long regnum, unsigned long val,
422                         int nat, struct kvm_pt_regs *regs)
423 {
424         unsigned long addr;
425         unsigned long bitmask;
426         unsigned long *unat;
427
428         /*
429          * First takes care of stacked registers
430          */
431         if (regnum >= IA64_FIRST_STACKED_GR) {
432                 set_rse_reg(regs, regnum, val, nat);
433                 return;
434         }
435
436         /*
437          * Now look at registers in [0-31] range and init correct UNAT
438          */
439         addr = (unsigned long)regs;
440         unat = &regs->eml_unat;
441         /*
442          * add offset from base of struct
443          * and do it !
444          */
445         addr += gr_info[regnum];
446
447         *(unsigned long *)addr = val;
448
449         /*
450          * We need to clear the corresponding UNAT bit to fully emulate the load
451          * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
452          */
453         bitmask   = 1UL << ((addr >> 3) & 0x3f);
454         if (nat)
455                 *unat |= bitmask;
456          else
457                 *unat &= ~bitmask;
458
459 }
460
461 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
462 {
463         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
464         u64 val;
465
466         if (!reg)
467                 return 0;
468         getreg(reg, &val, 0, regs);
469         return val;
470 }
471
472 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
473 {
474         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475         long sof = (regs->cr_ifs) & 0x7f;
476
477         if (!reg)
478                 return;
479         if (reg >= sof + 32)
480                 return;
481         setreg(reg, value, nat, regs);  /* FIXME: handle NATs later*/
482 }
483
484 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
485                                 struct kvm_pt_regs *regs)
486 {
487         /* Take floating register rotation into consideration*/
488         if (regnum >= IA64_FIRST_ROTATING_FR)
489                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
490 #define CASE_FIXED_FP(reg)                      \
491         case  (reg) :                           \
492                 ia64_stf_spill(fpval, reg);     \
493         break
494
495         switch (regnum) {
496                 CASE_FIXED_FP(0);
497                 CASE_FIXED_FP(1);
498                 CASE_FIXED_FP(2);
499                 CASE_FIXED_FP(3);
500                 CASE_FIXED_FP(4);
501                 CASE_FIXED_FP(5);
502
503                 CASE_FIXED_FP(6);
504                 CASE_FIXED_FP(7);
505                 CASE_FIXED_FP(8);
506                 CASE_FIXED_FP(9);
507                 CASE_FIXED_FP(10);
508                 CASE_FIXED_FP(11);
509
510                 CASE_FIXED_FP(12);
511                 CASE_FIXED_FP(13);
512                 CASE_FIXED_FP(14);
513                 CASE_FIXED_FP(15);
514                 CASE_FIXED_FP(16);
515                 CASE_FIXED_FP(17);
516                 CASE_FIXED_FP(18);
517                 CASE_FIXED_FP(19);
518                 CASE_FIXED_FP(20);
519                 CASE_FIXED_FP(21);
520                 CASE_FIXED_FP(22);
521                 CASE_FIXED_FP(23);
522                 CASE_FIXED_FP(24);
523                 CASE_FIXED_FP(25);
524                 CASE_FIXED_FP(26);
525                 CASE_FIXED_FP(27);
526                 CASE_FIXED_FP(28);
527                 CASE_FIXED_FP(29);
528                 CASE_FIXED_FP(30);
529                 CASE_FIXED_FP(31);
530                 CASE_FIXED_FP(32);
531                 CASE_FIXED_FP(33);
532                 CASE_FIXED_FP(34);
533                 CASE_FIXED_FP(35);
534                 CASE_FIXED_FP(36);
535                 CASE_FIXED_FP(37);
536                 CASE_FIXED_FP(38);
537                 CASE_FIXED_FP(39);
538                 CASE_FIXED_FP(40);
539                 CASE_FIXED_FP(41);
540                 CASE_FIXED_FP(42);
541                 CASE_FIXED_FP(43);
542                 CASE_FIXED_FP(44);
543                 CASE_FIXED_FP(45);
544                 CASE_FIXED_FP(46);
545                 CASE_FIXED_FP(47);
546                 CASE_FIXED_FP(48);
547                 CASE_FIXED_FP(49);
548                 CASE_FIXED_FP(50);
549                 CASE_FIXED_FP(51);
550                 CASE_FIXED_FP(52);
551                 CASE_FIXED_FP(53);
552                 CASE_FIXED_FP(54);
553                 CASE_FIXED_FP(55);
554                 CASE_FIXED_FP(56);
555                 CASE_FIXED_FP(57);
556                 CASE_FIXED_FP(58);
557                 CASE_FIXED_FP(59);
558                 CASE_FIXED_FP(60);
559                 CASE_FIXED_FP(61);
560                 CASE_FIXED_FP(62);
561                 CASE_FIXED_FP(63);
562                 CASE_FIXED_FP(64);
563                 CASE_FIXED_FP(65);
564                 CASE_FIXED_FP(66);
565                 CASE_FIXED_FP(67);
566                 CASE_FIXED_FP(68);
567                 CASE_FIXED_FP(69);
568                 CASE_FIXED_FP(70);
569                 CASE_FIXED_FP(71);
570                 CASE_FIXED_FP(72);
571                 CASE_FIXED_FP(73);
572                 CASE_FIXED_FP(74);
573                 CASE_FIXED_FP(75);
574                 CASE_FIXED_FP(76);
575                 CASE_FIXED_FP(77);
576                 CASE_FIXED_FP(78);
577                 CASE_FIXED_FP(79);
578                 CASE_FIXED_FP(80);
579                 CASE_FIXED_FP(81);
580                 CASE_FIXED_FP(82);
581                 CASE_FIXED_FP(83);
582                 CASE_FIXED_FP(84);
583                 CASE_FIXED_FP(85);
584                 CASE_FIXED_FP(86);
585                 CASE_FIXED_FP(87);
586                 CASE_FIXED_FP(88);
587                 CASE_FIXED_FP(89);
588                 CASE_FIXED_FP(90);
589                 CASE_FIXED_FP(91);
590                 CASE_FIXED_FP(92);
591                 CASE_FIXED_FP(93);
592                 CASE_FIXED_FP(94);
593                 CASE_FIXED_FP(95);
594                 CASE_FIXED_FP(96);
595                 CASE_FIXED_FP(97);
596                 CASE_FIXED_FP(98);
597                 CASE_FIXED_FP(99);
598                 CASE_FIXED_FP(100);
599                 CASE_FIXED_FP(101);
600                 CASE_FIXED_FP(102);
601                 CASE_FIXED_FP(103);
602                 CASE_FIXED_FP(104);
603                 CASE_FIXED_FP(105);
604                 CASE_FIXED_FP(106);
605                 CASE_FIXED_FP(107);
606                 CASE_FIXED_FP(108);
607                 CASE_FIXED_FP(109);
608                 CASE_FIXED_FP(110);
609                 CASE_FIXED_FP(111);
610                 CASE_FIXED_FP(112);
611                 CASE_FIXED_FP(113);
612                 CASE_FIXED_FP(114);
613                 CASE_FIXED_FP(115);
614                 CASE_FIXED_FP(116);
615                 CASE_FIXED_FP(117);
616                 CASE_FIXED_FP(118);
617                 CASE_FIXED_FP(119);
618                 CASE_FIXED_FP(120);
619                 CASE_FIXED_FP(121);
620                 CASE_FIXED_FP(122);
621                 CASE_FIXED_FP(123);
622                 CASE_FIXED_FP(124);
623                 CASE_FIXED_FP(125);
624                 CASE_FIXED_FP(126);
625                 CASE_FIXED_FP(127);
626         }
627 #undef CASE_FIXED_FP
628 }
629
630 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
631                                         struct kvm_pt_regs *regs)
632 {
633         /* Take floating register rotation into consideration*/
634         if (regnum >= IA64_FIRST_ROTATING_FR)
635                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
636
637 #define CASE_FIXED_FP(reg)                      \
638         case (reg) :                            \
639                 ia64_ldf_fill(reg, fpval);      \
640         break
641
642         switch (regnum) {
643                 CASE_FIXED_FP(2);
644                 CASE_FIXED_FP(3);
645                 CASE_FIXED_FP(4);
646                 CASE_FIXED_FP(5);
647
648                 CASE_FIXED_FP(6);
649                 CASE_FIXED_FP(7);
650                 CASE_FIXED_FP(8);
651                 CASE_FIXED_FP(9);
652                 CASE_FIXED_FP(10);
653                 CASE_FIXED_FP(11);
654
655                 CASE_FIXED_FP(12);
656                 CASE_FIXED_FP(13);
657                 CASE_FIXED_FP(14);
658                 CASE_FIXED_FP(15);
659                 CASE_FIXED_FP(16);
660                 CASE_FIXED_FP(17);
661                 CASE_FIXED_FP(18);
662                 CASE_FIXED_FP(19);
663                 CASE_FIXED_FP(20);
664                 CASE_FIXED_FP(21);
665                 CASE_FIXED_FP(22);
666                 CASE_FIXED_FP(23);
667                 CASE_FIXED_FP(24);
668                 CASE_FIXED_FP(25);
669                 CASE_FIXED_FP(26);
670                 CASE_FIXED_FP(27);
671                 CASE_FIXED_FP(28);
672                 CASE_FIXED_FP(29);
673                 CASE_FIXED_FP(30);
674                 CASE_FIXED_FP(31);
675                 CASE_FIXED_FP(32);
676                 CASE_FIXED_FP(33);
677                 CASE_FIXED_FP(34);
678                 CASE_FIXED_FP(35);
679                 CASE_FIXED_FP(36);
680                 CASE_FIXED_FP(37);
681                 CASE_FIXED_FP(38);
682                 CASE_FIXED_FP(39);
683                 CASE_FIXED_FP(40);
684                 CASE_FIXED_FP(41);
685                 CASE_FIXED_FP(42);
686                 CASE_FIXED_FP(43);
687                 CASE_FIXED_FP(44);
688                 CASE_FIXED_FP(45);
689                 CASE_FIXED_FP(46);
690                 CASE_FIXED_FP(47);
691                 CASE_FIXED_FP(48);
692                 CASE_FIXED_FP(49);
693                 CASE_FIXED_FP(50);
694                 CASE_FIXED_FP(51);
695                 CASE_FIXED_FP(52);
696                 CASE_FIXED_FP(53);
697                 CASE_FIXED_FP(54);
698                 CASE_FIXED_FP(55);
699                 CASE_FIXED_FP(56);
700                 CASE_FIXED_FP(57);
701                 CASE_FIXED_FP(58);
702                 CASE_FIXED_FP(59);
703                 CASE_FIXED_FP(60);
704                 CASE_FIXED_FP(61);
705                 CASE_FIXED_FP(62);
706                 CASE_FIXED_FP(63);
707                 CASE_FIXED_FP(64);
708                 CASE_FIXED_FP(65);
709                 CASE_FIXED_FP(66);
710                 CASE_FIXED_FP(67);
711                 CASE_FIXED_FP(68);
712                 CASE_FIXED_FP(69);
713                 CASE_FIXED_FP(70);
714                 CASE_FIXED_FP(71);
715                 CASE_FIXED_FP(72);
716                 CASE_FIXED_FP(73);
717                 CASE_FIXED_FP(74);
718                 CASE_FIXED_FP(75);
719                 CASE_FIXED_FP(76);
720                 CASE_FIXED_FP(77);
721                 CASE_FIXED_FP(78);
722                 CASE_FIXED_FP(79);
723                 CASE_FIXED_FP(80);
724                 CASE_FIXED_FP(81);
725                 CASE_FIXED_FP(82);
726                 CASE_FIXED_FP(83);
727                 CASE_FIXED_FP(84);
728                 CASE_FIXED_FP(85);
729                 CASE_FIXED_FP(86);
730                 CASE_FIXED_FP(87);
731                 CASE_FIXED_FP(88);
732                 CASE_FIXED_FP(89);
733                 CASE_FIXED_FP(90);
734                 CASE_FIXED_FP(91);
735                 CASE_FIXED_FP(92);
736                 CASE_FIXED_FP(93);
737                 CASE_FIXED_FP(94);
738                 CASE_FIXED_FP(95);
739                 CASE_FIXED_FP(96);
740                 CASE_FIXED_FP(97);
741                 CASE_FIXED_FP(98);
742                 CASE_FIXED_FP(99);
743                 CASE_FIXED_FP(100);
744                 CASE_FIXED_FP(101);
745                 CASE_FIXED_FP(102);
746                 CASE_FIXED_FP(103);
747                 CASE_FIXED_FP(104);
748                 CASE_FIXED_FP(105);
749                 CASE_FIXED_FP(106);
750                 CASE_FIXED_FP(107);
751                 CASE_FIXED_FP(108);
752                 CASE_FIXED_FP(109);
753                 CASE_FIXED_FP(110);
754                 CASE_FIXED_FP(111);
755                 CASE_FIXED_FP(112);
756                 CASE_FIXED_FP(113);
757                 CASE_FIXED_FP(114);
758                 CASE_FIXED_FP(115);
759                 CASE_FIXED_FP(116);
760                 CASE_FIXED_FP(117);
761                 CASE_FIXED_FP(118);
762                 CASE_FIXED_FP(119);
763                 CASE_FIXED_FP(120);
764                 CASE_FIXED_FP(121);
765                 CASE_FIXED_FP(122);
766                 CASE_FIXED_FP(123);
767                 CASE_FIXED_FP(124);
768                 CASE_FIXED_FP(125);
769                 CASE_FIXED_FP(126);
770                 CASE_FIXED_FP(127);
771         }
772 }
773
774 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
775                                                 struct ia64_fpreg *val)
776 {
777         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
778
779         getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
780 }
781
782 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
783                                                 struct ia64_fpreg *val)
784 {
785         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
786
787         if (reg > 1)
788                 setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
789 }
790
791 /************************************************************************
792  * lsapic timer
793  ***********************************************************************/
794 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
795 {
796         unsigned long guest_itc;
797         guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
798
799         if (guest_itc >= VMX(vcpu, last_itc)) {
800                 VMX(vcpu, last_itc) = guest_itc;
801                 return  guest_itc;
802         } else
803                 return VMX(vcpu, last_itc);
804 }
805
806 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
807 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
808 {
809         struct kvm_vcpu *v;
810         struct kvm *kvm;
811         int i;
812         long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
813         unsigned long vitv = VCPU(vcpu, itv);
814
815         kvm = (struct kvm *)KVM_VM_BASE;
816
817         if (vcpu->vcpu_id == 0) {
818                 for (i = 0; i < kvm->arch.online_vcpus; i++) {
819                         v = (struct kvm_vcpu *)((char *)vcpu +
820                                         sizeof(struct kvm_vcpu_data) * i);
821                         VMX(v, itc_offset) = itc_offset;
822                         VMX(v, last_itc) = 0;
823                 }
824         }
825         VMX(vcpu, last_itc) = 0;
826         if (VCPU(vcpu, itm) <= val) {
827                 VMX(vcpu, itc_check) = 0;
828                 vcpu_unpend_interrupt(vcpu, vitv);
829         } else {
830                 VMX(vcpu, itc_check) = 1;
831                 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
832         }
833
834 }
835
836 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
837 {
838         return ((u64)VCPU(vcpu, itm));
839 }
840
841 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
842 {
843         unsigned long vitv = VCPU(vcpu, itv);
844         VCPU(vcpu, itm) = val;
845
846         if (val > vcpu_get_itc(vcpu)) {
847                 VMX(vcpu, itc_check) = 1;
848                 vcpu_unpend_interrupt(vcpu, vitv);
849                 VMX(vcpu, timer_pending) = 0;
850         } else
851                 VMX(vcpu, itc_check) = 0;
852 }
853
854 #define  ITV_VECTOR(itv)    (itv&0xff)
855 #define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
856
857 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
858 {
859         VCPU(vcpu, itv) = val;
860         if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
861                 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
862                 vcpu->arch.timer_pending = 0;
863         }
864 }
865
866 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
867 {
868         int vec;
869
870         vec = highest_inservice_irq(vcpu);
871         if (vec == NULL_VECTOR)
872                 return;
873         VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
874         VCPU(vcpu, eoi) = 0;
875         vcpu->arch.irq_new_pending = 1;
876
877 }
878
879 /* See Table 5-8 in SDM vol2 for the definition */
880 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
881 {
882         union ia64_tpr vtpr;
883
884         vtpr.val = VCPU(vcpu, tpr);
885
886         if (h_inservice == NMI_VECTOR)
887                 return IRQ_MASKED_BY_INSVC;
888
889         if (h_pending == NMI_VECTOR) {
890                 /* Non Maskable Interrupt */
891                 return IRQ_NO_MASKED;
892         }
893
894         if (h_inservice == ExtINT_VECTOR)
895                 return IRQ_MASKED_BY_INSVC;
896
897         if (h_pending == ExtINT_VECTOR) {
898                 if (vtpr.mmi) {
899                         /* mask all external IRQ */
900                         return IRQ_MASKED_BY_VTPR;
901                 } else
902                         return IRQ_NO_MASKED;
903         }
904
905         if (is_higher_irq(h_pending, h_inservice)) {
906                 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
907                         return IRQ_NO_MASKED;
908                 else
909                         return IRQ_MASKED_BY_VTPR;
910         } else {
911                 return IRQ_MASKED_BY_INSVC;
912         }
913 }
914
915 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
916 {
917         long spsr;
918         int ret;
919
920         local_irq_save(spsr);
921         ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
922         local_irq_restore(spsr);
923
924         vcpu->arch.irq_new_pending = 1;
925 }
926
927 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
928 {
929         long spsr;
930         int ret;
931
932         local_irq_save(spsr);
933         ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
934         local_irq_restore(spsr);
935         if (ret) {
936                 vcpu->arch.irq_new_pending = 1;
937                 wmb();
938         }
939 }
940
941 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
942 {
943         u64 vhpi;
944
945         if (vec == NULL_VECTOR)
946                 vhpi = 0;
947         else if (vec == NMI_VECTOR)
948                 vhpi = 32;
949         else if (vec == ExtINT_VECTOR)
950                 vhpi = 16;
951         else
952                 vhpi = vec >> 4;
953
954         VCPU(vcpu, vhpi) = vhpi;
955         if (VCPU(vcpu, vac).a_int)
956                 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
957                                 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
958 }
959
960 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
961 {
962         int vec, h_inservice, mask;
963
964         vec = highest_pending_irq(vcpu);
965         h_inservice = highest_inservice_irq(vcpu);
966         mask = irq_masked(vcpu, vec, h_inservice);
967         if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
968                 if (VCPU(vcpu, vhpi))
969                         update_vhpi(vcpu, NULL_VECTOR);
970                 return IA64_SPURIOUS_INT_VECTOR;
971         }
972         if (mask == IRQ_MASKED_BY_VTPR) {
973                 update_vhpi(vcpu, vec);
974                 return IA64_SPURIOUS_INT_VECTOR;
975         }
976         VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
977         vcpu_unpend_interrupt(vcpu, vec);
978         return  (u64)vec;
979 }
980
981 /**************************************************************************
982   Privileged operation emulation routines
983  **************************************************************************/
984 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
985 {
986         union ia64_pta vpta;
987         union ia64_rr vrr;
988         u64 pval;
989         u64 vhpt_offset;
990
991         vpta.val = vcpu_get_pta(vcpu);
992         vrr.val = vcpu_get_rr(vcpu, vadr);
993         vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
994         if (vpta.vf) {
995                 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
996                                 vpta.val, 0, 0, 0, 0);
997         } else {
998                 pval = (vadr & VRN_MASK) | vhpt_offset |
999                         (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1000         }
1001         return  pval;
1002 }
1003
1004 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1005 {
1006         union ia64_rr vrr;
1007         union ia64_pta vpta;
1008         u64 pval;
1009
1010         vpta.val = vcpu_get_pta(vcpu);
1011         vrr.val = vcpu_get_rr(vcpu, vadr);
1012         if (vpta.vf) {
1013                 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1014                                                 0, 0, 0, 0, 0);
1015         } else
1016                 pval = 1;
1017
1018         return  pval;
1019 }
1020
1021 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1022 {
1023         struct thash_data *data;
1024         union ia64_pta vpta;
1025         u64 key;
1026
1027         vpta.val = vcpu_get_pta(vcpu);
1028         if (vpta.vf == 0) {
1029                 key = 1;
1030                 return key;
1031         }
1032         data = vtlb_lookup(vcpu, vadr, D_TLB);
1033         if (!data || !data->p)
1034                 key = 1;
1035         else
1036                 key = data->key;
1037
1038         return key;
1039 }
1040
1041 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1042 {
1043         unsigned long thash, vadr;
1044
1045         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1046         thash = vcpu_thash(vcpu, vadr);
1047         vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1048 }
1049
1050 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1051 {
1052         unsigned long tag, vadr;
1053
1054         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1055         tag = vcpu_ttag(vcpu, vadr);
1056         vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1057 }
1058
1059 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1060 {
1061         struct thash_data *data;
1062         union ia64_isr visr, pt_isr;
1063         struct kvm_pt_regs *regs;
1064         struct ia64_psr vpsr;
1065
1066         regs = vcpu_regs(vcpu);
1067         pt_isr.val = VMX(vcpu, cr_isr);
1068         visr.val = 0;
1069         visr.ei = pt_isr.ei;
1070         visr.ir = pt_isr.ir;
1071         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1072         visr.na = 1;
1073
1074         data = vhpt_lookup(vadr);
1075         if (data) {
1076                 if (data->p == 0) {
1077                         vcpu_set_isr(vcpu, visr.val);
1078                         data_page_not_present(vcpu, vadr);
1079                         return IA64_FAULT;
1080                 } else if (data->ma == VA_MATTR_NATPAGE) {
1081                         vcpu_set_isr(vcpu, visr.val);
1082                         dnat_page_consumption(vcpu, vadr);
1083                         return IA64_FAULT;
1084                 } else {
1085                         *padr = (data->gpaddr >> data->ps << data->ps) |
1086                                 (vadr & (PSIZE(data->ps) - 1));
1087                         return IA64_NO_FAULT;
1088                 }
1089         }
1090
1091         data = vtlb_lookup(vcpu, vadr, D_TLB);
1092         if (data) {
1093                 if (data->p == 0) {
1094                         vcpu_set_isr(vcpu, visr.val);
1095                         data_page_not_present(vcpu, vadr);
1096                         return IA64_FAULT;
1097                 } else if (data->ma == VA_MATTR_NATPAGE) {
1098                         vcpu_set_isr(vcpu, visr.val);
1099                         dnat_page_consumption(vcpu, vadr);
1100                         return IA64_FAULT;
1101                 } else{
1102                         *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1103                                 | (vadr & (PSIZE(data->ps) - 1));
1104                         return IA64_NO_FAULT;
1105                 }
1106         }
1107         if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1108                 if (vpsr.ic) {
1109                         vcpu_set_isr(vcpu, visr.val);
1110                         alt_dtlb(vcpu, vadr);
1111                         return IA64_FAULT;
1112                 } else {
1113                         nested_dtlb(vcpu);
1114                         return IA64_FAULT;
1115                 }
1116         } else {
1117                 if (vpsr.ic) {
1118                         vcpu_set_isr(vcpu, visr.val);
1119                         dvhpt_fault(vcpu, vadr);
1120                         return IA64_FAULT;
1121                 } else{
1122                         nested_dtlb(vcpu);
1123                         return IA64_FAULT;
1124                 }
1125         }
1126
1127         return IA64_NO_FAULT;
1128 }
1129
1130 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1131 {
1132         unsigned long r1, r3;
1133
1134         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1135
1136         if (vcpu_tpa(vcpu, r3, &r1))
1137                 return IA64_FAULT;
1138
1139         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1140         return(IA64_NO_FAULT);
1141 }
1142
1143 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1144 {
1145         unsigned long r1, r3;
1146
1147         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1148         r1 = vcpu_tak(vcpu, r3);
1149         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1150 }
1151
1152 /************************************
1153  * Insert/Purge translation register/cache
1154  ************************************/
1155 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1156 {
1157         thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1158 }
1159
1160 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1161 {
1162         thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1163 }
1164
1165 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1166 {
1167         u64 ps, va, rid;
1168         struct thash_data *p_itr;
1169
1170         ps = itir_ps(itir);
1171         va = PAGEALIGN(ifa, ps);
1172         pte &= ~PAGE_FLAGS_RV_MASK;
1173         rid = vcpu_get_rr(vcpu, ifa);
1174         rid = rid & RR_RID_MASK;
1175         p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1176         vcpu_set_tr(p_itr, pte, itir, va, rid);
1177         vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1178 }
1179
1180
1181 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1182 {
1183         u64 gpfn;
1184         u64 ps, va, rid;
1185         struct thash_data *p_dtr;
1186
1187         ps = itir_ps(itir);
1188         va = PAGEALIGN(ifa, ps);
1189         pte &= ~PAGE_FLAGS_RV_MASK;
1190
1191         if (ps != _PAGE_SIZE_16M)
1192                 thash_purge_entries(vcpu, va, ps);
1193         gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1194         if (__gpfn_is_io(gpfn))
1195                 pte |= VTLB_PTE_IO;
1196         rid = vcpu_get_rr(vcpu, va);
1197         rid = rid & RR_RID_MASK;
1198         p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1199         vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1200                                                         pte, itir, va, rid);
1201         vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1202 }
1203
1204 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1205 {
1206         int index;
1207         u64 va;
1208
1209         va = PAGEALIGN(ifa, ps);
1210         while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1211                 vcpu->arch.dtrs[index].page_flags = 0;
1212
1213         thash_purge_entries(vcpu, va, ps);
1214 }
1215
1216 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1217 {
1218         int index;
1219         u64 va;
1220
1221         va = PAGEALIGN(ifa, ps);
1222         while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1223                 vcpu->arch.itrs[index].page_flags = 0;
1224
1225         thash_purge_entries(vcpu, va, ps);
1226 }
1227
1228 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1229 {
1230         va = PAGEALIGN(va, ps);
1231         thash_purge_entries(vcpu, va, ps);
1232 }
1233
1234 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1235 {
1236         thash_purge_all(vcpu);
1237 }
1238
1239 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1240 {
1241         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1242         long psr;
1243         local_irq_save(psr);
1244         p->exit_reason = EXIT_REASON_PTC_G;
1245
1246         p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1247         p->u.ptc_g_data.vaddr = va;
1248         p->u.ptc_g_data.ps = ps;
1249         vmm_transition(vcpu);
1250         /* Do Local Purge Here*/
1251         vcpu_ptc_l(vcpu, va, ps);
1252         local_irq_restore(psr);
1253 }
1254
1255
1256 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1257 {
1258         vcpu_ptc_ga(vcpu, va, ps);
1259 }
1260
1261 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1262 {
1263         unsigned long ifa;
1264
1265         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1266         vcpu_ptc_e(vcpu, ifa);
1267 }
1268
1269 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1270 {
1271         unsigned long ifa, itir;
1272
1273         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1274         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1275         vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1276 }
1277
1278 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1279 {
1280         unsigned long ifa, itir;
1281
1282         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1283         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1284         vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1285 }
1286
1287 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1288 {
1289         unsigned long ifa, itir;
1290
1291         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1292         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1293         vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1294 }
1295
1296 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1297 {
1298         unsigned long ifa, itir;
1299
1300         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1301         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1302         vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1303 }
1304
1305 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1306 {
1307         unsigned long ifa, itir;
1308
1309         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1310         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1311         vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1312 }
1313
1314 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1315 {
1316         unsigned long itir, ifa, pte, slot;
1317
1318         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1319         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1320         itir = vcpu_get_itir(vcpu);
1321         ifa = vcpu_get_ifa(vcpu);
1322         vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1323 }
1324
1325
1326
1327 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1328 {
1329         unsigned long itir, ifa, pte, slot;
1330
1331         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1332         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1333         itir = vcpu_get_itir(vcpu);
1334         ifa = vcpu_get_ifa(vcpu);
1335         vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1336 }
1337
1338 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1339 {
1340         unsigned long itir, ifa, pte;
1341
1342         itir = vcpu_get_itir(vcpu);
1343         ifa = vcpu_get_ifa(vcpu);
1344         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1345         vcpu_itc_d(vcpu, pte, itir, ifa);
1346 }
1347
1348 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1349 {
1350         unsigned long itir, ifa, pte;
1351
1352         itir = vcpu_get_itir(vcpu);
1353         ifa = vcpu_get_ifa(vcpu);
1354         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1355         vcpu_itc_i(vcpu, pte, itir, ifa);
1356 }
1357
1358 /*************************************
1359  * Moves to semi-privileged registers
1360  *************************************/
1361
1362 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1363 {
1364         unsigned long imm;
1365
1366         if (inst.M30.s)
1367                 imm = -inst.M30.imm;
1368         else
1369                 imm = inst.M30.imm;
1370
1371         vcpu_set_itc(vcpu, imm);
1372 }
1373
1374 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1375 {
1376         unsigned long r2;
1377
1378         r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1379         vcpu_set_itc(vcpu, r2);
1380 }
1381
1382 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1383 {
1384         unsigned long r1;
1385
1386         r1 = vcpu_get_itc(vcpu);
1387         vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1388 }
1389
1390 /**************************************************************************
1391   struct kvm_vcpu protection key register access routines
1392  **************************************************************************/
1393
1394 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1395 {
1396         return ((unsigned long)ia64_get_pkr(reg));
1397 }
1398
1399 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1400 {
1401         ia64_set_pkr(reg, val);
1402 }
1403
1404 /********************************
1405  * Moves to privileged registers
1406  ********************************/
1407 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1408                                         unsigned long val)
1409 {
1410         union ia64_rr oldrr, newrr;
1411         unsigned long rrval;
1412         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1413         unsigned long psr;
1414
1415         oldrr.val = vcpu_get_rr(vcpu, reg);
1416         newrr.val = val;
1417         vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1418
1419         switch ((unsigned long)(reg >> VRN_SHIFT)) {
1420         case VRN6:
1421                 vcpu->arch.vmm_rr = vrrtomrr(val);
1422                 local_irq_save(psr);
1423                 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1424                 vmm_transition(vcpu);
1425                 local_irq_restore(psr);
1426                 break;
1427         case VRN4:
1428                 rrval = vrrtomrr(val);
1429                 vcpu->arch.metaphysical_saved_rr4 = rrval;
1430                 if (!is_physical_mode(vcpu))
1431                         ia64_set_rr(reg, rrval);
1432                 break;
1433         case VRN0:
1434                 rrval = vrrtomrr(val);
1435                 vcpu->arch.metaphysical_saved_rr0 = rrval;
1436                 if (!is_physical_mode(vcpu))
1437                         ia64_set_rr(reg, rrval);
1438                 break;
1439         default:
1440                 ia64_set_rr(reg, vrrtomrr(val));
1441                 break;
1442         }
1443
1444         return (IA64_NO_FAULT);
1445 }
1446
1447 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1448 {
1449         unsigned long r3, r2;
1450
1451         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1452         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1453         vcpu_set_rr(vcpu, r3, r2);
1454 }
1455
1456 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1457 {
1458 }
1459
1460 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1461 {
1462 }
1463
1464 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1465 {
1466         unsigned long r3, r2;
1467
1468         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1469         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1470         vcpu_set_pmc(vcpu, r3, r2);
1471 }
1472
1473 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1474 {
1475         unsigned long r3, r2;
1476
1477         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1478         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1479         vcpu_set_pmd(vcpu, r3, r2);
1480 }
1481
1482 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1483 {
1484         u64 r3, r2;
1485
1486         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1487         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1488         vcpu_set_pkr(vcpu, r3, r2);
1489 }
1490
1491 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1492 {
1493         unsigned long r3, r1;
1494
1495         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1496         r1 = vcpu_get_rr(vcpu, r3);
1497         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1498 }
1499
1500 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1501 {
1502         unsigned long r3, r1;
1503
1504         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1505         r1 = vcpu_get_pkr(vcpu, r3);
1506         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1507 }
1508
1509 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1510 {
1511         unsigned long r3, r1;
1512
1513         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1514         r1 = vcpu_get_dbr(vcpu, r3);
1515         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1516 }
1517
1518 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1519 {
1520         unsigned long r3, r1;
1521
1522         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1523         r1 = vcpu_get_ibr(vcpu, r3);
1524         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1525 }
1526
1527 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1528 {
1529         unsigned long r3, r1;
1530
1531         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1532         r1 = vcpu_get_pmc(vcpu, r3);
1533         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1534 }
1535
1536 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1537 {
1538         /* FIXME: This could get called as a result of a rsvd-reg fault */
1539         if (reg > (ia64_get_cpuid(3) & 0xff))
1540                 return 0;
1541         else
1542                 return ia64_get_cpuid(reg);
1543 }
1544
1545 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1546 {
1547         unsigned long r3, r1;
1548
1549         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1550         r1 = vcpu_get_cpuid(vcpu, r3);
1551         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1552 }
1553
1554 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1555 {
1556         VCPU(vcpu, tpr) = val;
1557         vcpu->arch.irq_check = 1;
1558 }
1559
1560 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1561 {
1562         unsigned long r2;
1563
1564         r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1565         VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1566
1567         switch (inst.M32.cr3) {
1568         case 0:
1569                 vcpu_set_dcr(vcpu, r2);
1570                 break;
1571         case 1:
1572                 vcpu_set_itm(vcpu, r2);
1573                 break;
1574         case 66:
1575                 vcpu_set_tpr(vcpu, r2);
1576                 break;
1577         case 67:
1578                 vcpu_set_eoi(vcpu, r2);
1579                 break;
1580         default:
1581                 break;
1582         }
1583
1584         return 0;
1585 }
1586
1587 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1588 {
1589         unsigned long tgt = inst.M33.r1;
1590         unsigned long val;
1591
1592         switch (inst.M33.cr3) {
1593         case 65:
1594                 val = vcpu_get_ivr(vcpu);
1595                 vcpu_set_gr(vcpu, tgt, val, 0);
1596                 break;
1597
1598         case 67:
1599                 vcpu_set_gr(vcpu, tgt, 0L, 0);
1600                 break;
1601         default:
1602                 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1603                 vcpu_set_gr(vcpu, tgt, val, 0);
1604                 break;
1605         }
1606
1607         return 0;
1608 }
1609
1610 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1611 {
1612
1613         unsigned long mask;
1614         struct kvm_pt_regs *regs;
1615         struct ia64_psr old_psr, new_psr;
1616
1617         old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1618
1619         regs = vcpu_regs(vcpu);
1620         /* We only support guest as:
1621          *  vpsr.pk = 0
1622          *  vpsr.is = 0
1623          * Otherwise panic
1624          */
1625         if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1626                 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1627                                 & vpsr.is=0\n");
1628
1629         /*
1630          * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1631          * Since these bits will become 0, after success execution of each
1632          * instruction, we will change set them to mIA64_PSR
1633          */
1634         VCPU(vcpu, vpsr) = val
1635                 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1636                         IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1637
1638         if (!old_psr.i && (val & IA64_PSR_I)) {
1639                 /* vpsr.i 0->1 */
1640                 vcpu->arch.irq_check = 1;
1641         }
1642         new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1643
1644         /*
1645          * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1646          * , except for the following bits:
1647          *  ic/i/dt/si/rt/mc/it/bn/vm
1648          */
1649         mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1650                 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1651                 IA64_PSR_VM;
1652
1653         regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1654
1655         check_mm_mode_switch(vcpu, old_psr, new_psr);
1656
1657         return ;
1658 }
1659
1660 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1661 {
1662         struct ia64_psr vpsr;
1663
1664         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1665         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1666
1667         if (!vpsr.ic)
1668                 VCPU(vcpu, ifs) = regs->cr_ifs;
1669         regs->cr_ifs = IA64_IFS_V;
1670         return (IA64_NO_FAULT);
1671 }
1672
1673
1674
1675 /**************************************************************************
1676   VCPU banked general register access routines
1677  **************************************************************************/
1678 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1679         do {                                                            \
1680                 __asm__ __volatile__ (                                  \
1681                                 ";;extr.u %0 = %3,%6,16;;\n"            \
1682                                 "dep %1 = %0, %1, 0, 16;;\n"            \
1683                                 "st8 [%4] = %1\n"                       \
1684                                 "extr.u %0 = %2, 16, 16;;\n"            \
1685                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1686                                 "st8 [%5] = %3\n"                       \
1687                                 ::"r"(i), "r"(*b1unat), "r"(*b0unat),   \
1688                                 "r"(*runat), "r"(b1unat), "r"(runat),   \
1689                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1690         } while (0)
1691
1692 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1693 {
1694         unsigned long i;
1695
1696         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1697         unsigned long *r = &regs->r16;
1698         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1699         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1700         unsigned long *runat = &regs->eml_unat;
1701         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1702         unsigned long *b1unat = &VCPU(vcpu, vnat);
1703
1704
1705         if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1706                 for (i = 0; i < 16; i++) {
1707                         *b1++ = *r;
1708                         *r++ = *b0++;
1709                 }
1710                 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1711                                 VMM_PT_REGS_R16_SLOT);
1712                 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1713         }
1714 }
1715
1716 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1717         do {                                                            \
1718                 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"    \
1719                                 "dep %1 = %0, %1, 16, 16;;\n"           \
1720                                 "st8 [%4] = %1\n"                       \
1721                                 "extr.u %0 = %2, 0, 16;;\n"             \
1722                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1723                                 "st8 [%5] = %3\n"                       \
1724                                 ::"r"(i), "r"(*b0unat), "r"(*b1unat),   \
1725                                 "r"(*runat), "r"(b0unat), "r"(runat),   \
1726                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1727         } while (0)
1728
1729 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1730 {
1731         unsigned long i;
1732         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1733         unsigned long *r = &regs->r16;
1734         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1735         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1736         unsigned long *runat = &regs->eml_unat;
1737         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1738         unsigned long *b1unat = &VCPU(vcpu, vnat);
1739
1740         if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1741                 for (i = 0; i < 16; i++) {
1742                         *b0++ = *r;
1743                         *r++ = *b1++;
1744                 }
1745                 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1746                                 VMM_PT_REGS_R16_SLOT);
1747                 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1748         }
1749 }
1750
1751 void vcpu_rfi(struct kvm_vcpu *vcpu)
1752 {
1753         unsigned long ifs, psr;
1754         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1755
1756         psr = VCPU(vcpu, ipsr);
1757         if (psr & IA64_PSR_BN)
1758                 vcpu_bsw1(vcpu);
1759         else
1760                 vcpu_bsw0(vcpu);
1761         vcpu_set_psr(vcpu, psr);
1762         ifs = VCPU(vcpu, ifs);
1763         if (ifs >> 63)
1764                 regs->cr_ifs = ifs;
1765         regs->cr_iip = VCPU(vcpu, iip);
1766 }
1767
1768 /*
1769    VPSR can't keep track of below bits of guest PSR
1770    This function gets guest PSR
1771  */
1772
1773 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1774 {
1775         unsigned long mask;
1776         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1777
1778         mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1779                 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1780         return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1781 }
1782
1783 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1784 {
1785         unsigned long vpsr;
1786         unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1787                                         | inst.M44.imm;
1788
1789         vpsr = vcpu_get_psr(vcpu);
1790         vpsr &= (~imm24);
1791         vcpu_set_psr(vcpu, vpsr);
1792 }
1793
1794 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1795 {
1796         unsigned long vpsr;
1797         unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1798                                 | inst.M44.imm;
1799
1800         vpsr = vcpu_get_psr(vcpu);
1801         vpsr |= imm24;
1802         vcpu_set_psr(vcpu, vpsr);
1803 }
1804
1805 /* Generate Mask
1806  * Parameter:
1807  *  bit -- starting bit
1808  *  len -- how many bits
1809  */
1810 #define MASK(bit,len)                                   \
1811 ({                                                      \
1812                 __u64   ret;                            \
1813                                                         \
1814                 __asm __volatile("dep %0=-1, r0, %1, %2"\
1815                                 : "=r" (ret):           \
1816                   "M" (bit),                            \
1817                   "M" (len));                           \
1818                 ret;                                    \
1819 })
1820
1821 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1822 {
1823         val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1824         vcpu_set_psr(vcpu, val);
1825 }
1826
1827 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1828 {
1829         unsigned long val;
1830
1831         val = vcpu_get_gr(vcpu, inst.M35.r2);
1832         vcpu_set_psr_l(vcpu, val);
1833 }
1834
1835 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1836 {
1837         unsigned long val;
1838
1839         val = vcpu_get_psr(vcpu);
1840         val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1841         vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1842 }
1843
1844 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1845 {
1846         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1847         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1848         if (ipsr->ri == 2) {
1849                 ipsr->ri = 0;
1850                 regs->cr_iip += 16;
1851         } else
1852                 ipsr->ri++;
1853 }
1854
1855 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1856 {
1857         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1858         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1859
1860         if (ipsr->ri == 0) {
1861                 ipsr->ri = 2;
1862                 regs->cr_iip -= 16;
1863         } else
1864                 ipsr->ri--;
1865 }
1866
1867 /** Emulate a privileged operation.
1868  *
1869  *
1870  * @param vcpu virtual cpu
1871  * @cause the reason cause virtualization fault
1872  * @opcode the instruction code which cause virtualization fault
1873  */
1874
1875 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1876 {
1877         unsigned long status, cause, opcode ;
1878         INST64 inst;
1879
1880         status = IA64_NO_FAULT;
1881         cause = VMX(vcpu, cause);
1882         opcode = VMX(vcpu, opcode);
1883         inst.inst = opcode;
1884         /*
1885          * Switch to actual virtual rid in rr0 and rr4,
1886          * which is required by some tlb related instructions.
1887          */
1888         prepare_if_physical_mode(vcpu);
1889
1890         switch (cause) {
1891         case EVENT_RSM:
1892                 kvm_rsm(vcpu, inst);
1893                 break;
1894         case EVENT_SSM:
1895                 kvm_ssm(vcpu, inst);
1896                 break;
1897         case EVENT_MOV_TO_PSR:
1898                 kvm_mov_to_psr(vcpu, inst);
1899                 break;
1900         case EVENT_MOV_FROM_PSR:
1901                 kvm_mov_from_psr(vcpu, inst);
1902                 break;
1903         case EVENT_MOV_FROM_CR:
1904                 kvm_mov_from_cr(vcpu, inst);
1905                 break;
1906         case EVENT_MOV_TO_CR:
1907                 kvm_mov_to_cr(vcpu, inst);
1908                 break;
1909         case EVENT_BSW_0:
1910                 vcpu_bsw0(vcpu);
1911                 break;
1912         case EVENT_BSW_1:
1913                 vcpu_bsw1(vcpu);
1914                 break;
1915         case EVENT_COVER:
1916                 vcpu_cover(vcpu);
1917                 break;
1918         case EVENT_RFI:
1919                 vcpu_rfi(vcpu);
1920                 break;
1921         case EVENT_ITR_D:
1922                 kvm_itr_d(vcpu, inst);
1923                 break;
1924         case EVENT_ITR_I:
1925                 kvm_itr_i(vcpu, inst);
1926                 break;
1927         case EVENT_PTR_D:
1928                 kvm_ptr_d(vcpu, inst);
1929                 break;
1930         case EVENT_PTR_I:
1931                 kvm_ptr_i(vcpu, inst);
1932                 break;
1933         case EVENT_ITC_D:
1934                 kvm_itc_d(vcpu, inst);
1935                 break;
1936         case EVENT_ITC_I:
1937                 kvm_itc_i(vcpu, inst);
1938                 break;
1939         case EVENT_PTC_L:
1940                 kvm_ptc_l(vcpu, inst);
1941                 break;
1942         case EVENT_PTC_G:
1943                 kvm_ptc_g(vcpu, inst);
1944                 break;
1945         case EVENT_PTC_GA:
1946                 kvm_ptc_ga(vcpu, inst);
1947                 break;
1948         case EVENT_PTC_E:
1949                 kvm_ptc_e(vcpu, inst);
1950                 break;
1951         case EVENT_MOV_TO_RR:
1952                 kvm_mov_to_rr(vcpu, inst);
1953                 break;
1954         case EVENT_MOV_FROM_RR:
1955                 kvm_mov_from_rr(vcpu, inst);
1956                 break;
1957         case EVENT_THASH:
1958                 kvm_thash(vcpu, inst);
1959                 break;
1960         case EVENT_TTAG:
1961                 kvm_ttag(vcpu, inst);
1962                 break;
1963         case EVENT_TPA:
1964                 status = kvm_tpa(vcpu, inst);
1965                 break;
1966         case EVENT_TAK:
1967                 kvm_tak(vcpu, inst);
1968                 break;
1969         case EVENT_MOV_TO_AR_IMM:
1970                 kvm_mov_to_ar_imm(vcpu, inst);
1971                 break;
1972         case EVENT_MOV_TO_AR:
1973                 kvm_mov_to_ar_reg(vcpu, inst);
1974                 break;
1975         case EVENT_MOV_FROM_AR:
1976                 kvm_mov_from_ar_reg(vcpu, inst);
1977                 break;
1978         case EVENT_MOV_TO_DBR:
1979                 kvm_mov_to_dbr(vcpu, inst);
1980                 break;
1981         case EVENT_MOV_TO_IBR:
1982                 kvm_mov_to_ibr(vcpu, inst);
1983                 break;
1984         case EVENT_MOV_TO_PMC:
1985                 kvm_mov_to_pmc(vcpu, inst);
1986                 break;
1987         case EVENT_MOV_TO_PMD:
1988                 kvm_mov_to_pmd(vcpu, inst);
1989                 break;
1990         case EVENT_MOV_TO_PKR:
1991                 kvm_mov_to_pkr(vcpu, inst);
1992                 break;
1993         case EVENT_MOV_FROM_DBR:
1994                 kvm_mov_from_dbr(vcpu, inst);
1995                 break;
1996         case EVENT_MOV_FROM_IBR:
1997                 kvm_mov_from_ibr(vcpu, inst);
1998                 break;
1999         case EVENT_MOV_FROM_PMC:
2000                 kvm_mov_from_pmc(vcpu, inst);
2001                 break;
2002         case EVENT_MOV_FROM_PKR:
2003                 kvm_mov_from_pkr(vcpu, inst);
2004                 break;
2005         case EVENT_MOV_FROM_CPUID:
2006                 kvm_mov_from_cpuid(vcpu, inst);
2007                 break;
2008         case EVENT_VMSW:
2009                 status = IA64_FAULT;
2010                 break;
2011         default:
2012                 break;
2013         };
2014         /*Assume all status is NO_FAULT ?*/
2015         if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2016                 vcpu_increment_iip(vcpu);
2017
2018         recover_if_physical_mode(vcpu);
2019 }
2020
2021 void init_vcpu(struct kvm_vcpu *vcpu)
2022 {
2023         int i;
2024
2025         vcpu->arch.mode_flags = GUEST_IN_PHY;
2026         VMX(vcpu, vrr[0]) = 0x38;
2027         VMX(vcpu, vrr[1]) = 0x38;
2028         VMX(vcpu, vrr[2]) = 0x38;
2029         VMX(vcpu, vrr[3]) = 0x38;
2030         VMX(vcpu, vrr[4]) = 0x38;
2031         VMX(vcpu, vrr[5]) = 0x38;
2032         VMX(vcpu, vrr[6]) = 0x38;
2033         VMX(vcpu, vrr[7]) = 0x38;
2034         VCPU(vcpu, vpsr) = IA64_PSR_BN;
2035         VCPU(vcpu, dcr) = 0;
2036         /* pta.size must not be 0.  The minimum is 15 (32k) */
2037         VCPU(vcpu, pta) = 15 << 2;
2038         VCPU(vcpu, itv) = 0x10000;
2039         VCPU(vcpu, itm) = 0;
2040         VMX(vcpu, last_itc) = 0;
2041
2042         VCPU(vcpu, lid) = VCPU_LID(vcpu);
2043         VCPU(vcpu, ivr) = 0;
2044         VCPU(vcpu, tpr) = 0x10000;
2045         VCPU(vcpu, eoi) = 0;
2046         VCPU(vcpu, irr[0]) = 0;
2047         VCPU(vcpu, irr[1]) = 0;
2048         VCPU(vcpu, irr[2]) = 0;
2049         VCPU(vcpu, irr[3]) = 0;
2050         VCPU(vcpu, pmv) = 0x10000;
2051         VCPU(vcpu, cmcv) = 0x10000;
2052         VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2053         VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2054         update_vhpi(vcpu, NULL_VECTOR);
2055         VLSAPIC_XTP(vcpu) = 0x80;       /* disabled */
2056
2057         for (i = 0; i < 4; i++)
2058                 VLSAPIC_INSVC(vcpu, i) = 0;
2059 }
2060
2061 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2062 {
2063         unsigned long psr;
2064
2065         local_irq_save(psr);
2066
2067         /* WARNING: not allow co-exist of both virtual mode and physical
2068          * mode in same region
2069          */
2070
2071         vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2072         vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2073
2074         if (is_physical_mode(vcpu)) {
2075                 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2076                         panic_vm(vcpu, "Machine Status conflicts!\n");
2077
2078                 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2079                 ia64_dv_serialize_data();
2080                 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2081                 ia64_dv_serialize_data();
2082         } else {
2083                 ia64_set_rr((VRN0 << VRN_SHIFT),
2084                                 vcpu->arch.metaphysical_saved_rr0);
2085                 ia64_dv_serialize_data();
2086                 ia64_set_rr((VRN4 << VRN_SHIFT),
2087                                 vcpu->arch.metaphysical_saved_rr4);
2088                 ia64_dv_serialize_data();
2089         }
2090         ia64_set_rr((VRN1 << VRN_SHIFT),
2091                         vrrtomrr(VMX(vcpu, vrr[VRN1])));
2092         ia64_dv_serialize_data();
2093         ia64_set_rr((VRN2 << VRN_SHIFT),
2094                         vrrtomrr(VMX(vcpu, vrr[VRN2])));
2095         ia64_dv_serialize_data();
2096         ia64_set_rr((VRN3 << VRN_SHIFT),
2097                         vrrtomrr(VMX(vcpu, vrr[VRN3])));
2098         ia64_dv_serialize_data();
2099         ia64_set_rr((VRN5 << VRN_SHIFT),
2100                         vrrtomrr(VMX(vcpu, vrr[VRN5])));
2101         ia64_dv_serialize_data();
2102         ia64_set_rr((VRN7 << VRN_SHIFT),
2103                         vrrtomrr(VMX(vcpu, vrr[VRN7])));
2104         ia64_dv_serialize_data();
2105         ia64_srlz_d();
2106         ia64_set_psr(psr);
2107 }
2108
2109 int vmm_entry(void)
2110 {
2111         struct kvm_vcpu *v;
2112         v = current_vcpu;
2113
2114         ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2115                                                 0, 0, 0, 0, 0, 0);
2116         kvm_init_vtlb(v);
2117         kvm_init_vhpt(v);
2118         init_vcpu(v);
2119         kvm_init_all_rr(v);
2120         vmm_reset_entry();
2121
2122         return 0;
2123 }
2124
2125 static void kvm_show_registers(struct kvm_pt_regs *regs)
2126 {
2127         unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2128
2129         struct kvm_vcpu *vcpu = current_vcpu;
2130         if (vcpu != NULL)
2131                 printk("vcpu 0x%p vcpu %d\n",
2132                        vcpu, vcpu->vcpu_id);
2133
2134         printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
2135                regs->cr_ipsr, regs->cr_ifs, ip);
2136
2137         printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2138                regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2139         printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
2140                regs->ar_rnat, regs->ar_bspstore, regs->pr);
2141         printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2142                regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2143         printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2144         printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
2145                                                         regs->b6, regs->b7);
2146         printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
2147                regs->f6.u.bits[1], regs->f6.u.bits[0],
2148                regs->f7.u.bits[1], regs->f7.u.bits[0]);
2149         printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
2150                regs->f8.u.bits[1], regs->f8.u.bits[0],
2151                regs->f9.u.bits[1], regs->f9.u.bits[0]);
2152         printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2153                regs->f10.u.bits[1], regs->f10.u.bits[0],
2154                regs->f11.u.bits[1], regs->f11.u.bits[0]);
2155
2156         printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
2157                                                         regs->r2, regs->r3);
2158         printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
2159                                                         regs->r9, regs->r10);
2160         printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2161                                                         regs->r12, regs->r13);
2162         printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2163                                                         regs->r15, regs->r16);
2164         printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2165                                                         regs->r18, regs->r19);
2166         printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2167                                                         regs->r21, regs->r22);
2168         printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2169                                                         regs->r24, regs->r25);
2170         printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2171                                                         regs->r27, regs->r28);
2172         printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2173                                                         regs->r30, regs->r31);
2174
2175 }
2176
2177 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2178 {
2179         va_list args;
2180         char buf[256];
2181
2182         struct kvm_pt_regs *regs = vcpu_regs(v);
2183         struct exit_ctl_data *p = &v->arch.exit_data;
2184         va_start(args, fmt);
2185         vsnprintf(buf, sizeof(buf), fmt, args);
2186         va_end(args);
2187         printk(buf);
2188         kvm_show_registers(regs);
2189         p->exit_reason = EXIT_REASON_VM_PANIC;
2190         vmm_transition(v);
2191         /*Never to return*/
2192         while (1);
2193 }