1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
172 #define X2(x...) x, x
173 #define X3(x...) X2(x), x
174 #define X4(x...) X2(x), X2(x)
175 #define X5(x...) X4(x), x
176 #define X6(x...) X4(x), X2(x)
177 #define X7(x...) X4(x), X3(x)
178 #define X8(x...) X4(x), X4(x)
179 #define X16(x...) X8(x), X8(x)
181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182 #define FASTOP_SIZE 8
185 * fastop functions have a special calling convention:
190 * flags: rflags (in/out)
191 * ex: rsi (in:fastop pointer, out:zero if exception)
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
207 int (*execute)(struct x86_emulate_ctxt *ctxt);
208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
211 const struct escape *esc;
212 void (*fastop)(struct fastop *fake);
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
218 struct opcode mod012[8];
219 struct opcode mod3[8];
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
231 struct opcode high[64];
234 /* EFLAGS bit definitions. */
235 #define EFLG_ID (1<<21)
236 #define EFLG_VIP (1<<20)
237 #define EFLG_VIF (1<<19)
238 #define EFLG_AC (1<<18)
239 #define EFLG_VM (1<<17)
240 #define EFLG_RF (1<<16)
241 #define EFLG_IOPL (3<<12)
242 #define EFLG_NT (1<<14)
243 #define EFLG_OF (1<<11)
244 #define EFLG_DF (1<<10)
245 #define EFLG_IF (1<<9)
246 #define EFLG_TF (1<<8)
247 #define EFLG_SF (1<<7)
248 #define EFLG_ZF (1<<6)
249 #define EFLG_AF (1<<4)
250 #define EFLG_PF (1<<2)
251 #define EFLG_CF (1<<0)
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 #define EFLG_RESERVED_ONE_MASK 2
256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
262 return ctxt->_regs[nr];
265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 return reg_write(ctxt, nr);
278 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307 #define FOP_RET "ret \n\t"
309 #define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
319 #define FOPNOP() FOP_ALIGN FOP_RET
321 #define FOP1E(op, dst) \
322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
324 #define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
327 #define FASTOP1(op) \
332 ON64(FOP1E(op##q, rax)) \
335 /* 1-operand, using src2 (for MUL/DIV r/m) */
336 #define FASTOP1SRC2(op, name) \
341 ON64(FOP1E(op, rcx)) \
344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345 #define FASTOP1SRC2EX(op, name) \
350 ON64(FOP1EEX(op, rcx)) \
353 #define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
356 #define FASTOP2(op) \
358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
364 /* 2 operand, word only */
365 #define FASTOP2W(op) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
373 /* 2 operand, src is CL */
374 #define FASTOP2CL(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
382 #define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
385 /* 3-operand, word-only, src2=cl */
386 #define FASTOP3WCL(op) \
389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
394 /* Special case for SETcc - 1 instruction per cc */
395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
397 asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
426 struct x86_instruction_info info = {
427 .intercept = intercept,
428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
433 .dst_val = ctxt->dst.val64,
434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
437 .next_rip = ctxt->eip,
440 return ctxt->ops->intercept(ctxt, &info, stage);
443 static void assign_masked(ulong *dest, ulong src, ulong mask)
445 *dest = (*dest & ~mask) | (src & mask);
448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
456 struct desc_struct ss;
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
464 static int stack_size(struct x86_emulate_ctxt *ctxt)
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
469 /* Access/update address held in a register, based on addressing mode. */
470 static inline unsigned long
471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
473 if (ctxt->ad_bytes == sizeof(unsigned long))
476 return reg & ad_mask(ctxt);
479 static inline unsigned long
480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
482 return address_mask(ctxt, reg);
485 static void masked_increment(ulong *reg, ulong mask, int inc)
487 assign_masked(reg, *reg + inc, mask);
491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
495 if (ctxt->ad_bytes == sizeof(unsigned long))
498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
507 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
509 register_address_increment(ctxt, &ctxt->_eip, rel);
512 static u32 desc_limit_scaled(struct desc_struct *desc)
514 u32 limit = get_desc_limit(desc);
516 return desc->g ? (limit << 12) | 0xfff : limit;
519 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
524 return ctxt->ops->get_cached_segment_base(ctxt, seg);
527 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
528 u32 error, bool valid)
531 ctxt->exception.vector = vec;
532 ctxt->exception.error_code = error;
533 ctxt->exception.error_code_valid = valid;
534 return X86EMUL_PROPAGATE_FAULT;
537 static int emulate_db(struct x86_emulate_ctxt *ctxt)
539 return emulate_exception(ctxt, DB_VECTOR, 0, false);
542 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
544 return emulate_exception(ctxt, GP_VECTOR, err, true);
547 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
549 return emulate_exception(ctxt, SS_VECTOR, err, true);
552 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
554 return emulate_exception(ctxt, UD_VECTOR, 0, false);
557 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
559 return emulate_exception(ctxt, TS_VECTOR, err, true);
562 static int emulate_de(struct x86_emulate_ctxt *ctxt)
564 return emulate_exception(ctxt, DE_VECTOR, 0, false);
567 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
569 return emulate_exception(ctxt, NM_VECTOR, 0, false);
572 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
575 struct desc_struct desc;
577 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
581 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
586 struct desc_struct desc;
588 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
589 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
593 * x86 defines three classes of vector instructions: explicitly
594 * aligned, explicitly unaligned, and the rest, which change behaviour
595 * depending on whether they're AVX encoded or not.
597 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
598 * subject to the same check.
600 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
602 if (likely(size < 16))
605 if (ctxt->d & Aligned)
607 else if (ctxt->d & Unaligned)
609 else if (ctxt->d & Avx)
615 static int __linearize(struct x86_emulate_ctxt *ctxt,
616 struct segmented_address addr,
617 unsigned size, bool write, bool fetch,
620 struct desc_struct desc;
627 la = seg_base(ctxt, addr.seg) + addr.ea;
628 switch (ctxt->mode) {
629 case X86EMUL_MODE_PROT64:
630 if (((signed long)la << 16) >> 16 != la)
631 return emulate_gp(ctxt, 0);
634 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
638 /* code segment in protected mode or read-only data segment */
639 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
640 || !(desc.type & 2)) && write)
642 /* unreadable code segment */
643 if (!fetch && (desc.type & 8) && !(desc.type & 2))
645 lim = desc_limit_scaled(&desc);
646 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
647 (ctxt->d & NoBigReal)) {
648 /* la is between zero and 0xffff */
649 if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
651 } else if ((desc.type & 8) || !(desc.type & 4)) {
652 /* expand-up segment */
653 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
656 /* expand-down segment */
657 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
659 lim = desc.d ? 0xffffffff : 0xffff;
660 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
663 cpl = ctxt->ops->cpl(ctxt);
664 if (!(desc.type & 8)) {
668 } else if ((desc.type & 8) && !(desc.type & 4)) {
669 /* nonconforming code segment */
672 } else if ((desc.type & 8) && (desc.type & 4)) {
673 /* conforming code segment */
679 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
681 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
682 return emulate_gp(ctxt, 0);
684 return X86EMUL_CONTINUE;
686 if (addr.seg == VCPU_SREG_SS)
687 return emulate_ss(ctxt, sel);
689 return emulate_gp(ctxt, sel);
692 static int linearize(struct x86_emulate_ctxt *ctxt,
693 struct segmented_address addr,
694 unsigned size, bool write,
697 return __linearize(ctxt, addr, size, write, false, linear);
701 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
702 struct segmented_address addr,
709 rc = linearize(ctxt, addr, size, false, &linear);
710 if (rc != X86EMUL_CONTINUE)
712 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
716 * Prefetch the remaining bytes of the instruction without crossing page
717 * boundary if they are not in fetch_cache yet.
719 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
723 unsigned long linear;
724 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
725 struct segmented_address addr = { .seg = VCPU_SREG_CS,
726 .ea = ctxt->eip + cur_size };
728 size = 15UL ^ cur_size;
729 rc = __linearize(ctxt, addr, size, false, true, &linear);
730 if (unlikely(rc != X86EMUL_CONTINUE))
733 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
736 * One instruction can only straddle two pages,
737 * and one has been loaded at the beginning of
738 * x86_decode_insn. So, if not enough bytes
739 * still, we must have hit the 15-byte boundary.
741 if (unlikely(size < op_size))
742 return X86EMUL_UNHANDLEABLE;
743 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
744 size, &ctxt->exception);
745 if (unlikely(rc != X86EMUL_CONTINUE))
747 ctxt->fetch.end += size;
748 return X86EMUL_CONTINUE;
751 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
754 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
755 return __do_insn_fetch_bytes(ctxt, size);
757 return X86EMUL_CONTINUE;
760 /* Fetch next part of the instruction being emulated. */
761 #define insn_fetch(_type, _ctxt) \
764 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
765 if (rc != X86EMUL_CONTINUE) \
767 ctxt->_eip += sizeof(_type); \
768 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
769 ctxt->fetch.ptr += sizeof(_type); \
773 #define insn_fetch_arr(_arr, _size, _ctxt) \
775 rc = do_insn_fetch_bytes(_ctxt, _size); \
776 if (rc != X86EMUL_CONTINUE) \
778 ctxt->_eip += (_size); \
779 memcpy(_arr, ctxt->fetch.ptr, _size); \
780 ctxt->fetch.ptr += (_size); \
784 * Given the 'reg' portion of a ModRM byte, and a register block, return a
785 * pointer into the block that addresses the relevant register.
786 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
788 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
792 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
794 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
795 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
797 p = reg_rmw(ctxt, modrm_reg);
801 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
802 struct segmented_address addr,
803 u16 *size, unsigned long *address, int op_bytes)
810 rc = segmented_read_std(ctxt, addr, size, 2);
811 if (rc != X86EMUL_CONTINUE)
814 rc = segmented_read_std(ctxt, addr, address, op_bytes);
828 FASTOP1SRC2(mul, mul_ex);
829 FASTOP1SRC2(imul, imul_ex);
830 FASTOP1SRC2EX(div, div_ex);
831 FASTOP1SRC2EX(idiv, idiv_ex);
860 static u8 test_cc(unsigned int condition, unsigned long flags)
863 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
865 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
866 asm("push %[flags]; popf; call *%[fastop]"
867 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
871 static void fetch_register_operand(struct operand *op)
875 op->val = *(u8 *)op->addr.reg;
878 op->val = *(u16 *)op->addr.reg;
881 op->val = *(u32 *)op->addr.reg;
884 op->val = *(u64 *)op->addr.reg;
889 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
891 ctxt->ops->get_fpu(ctxt);
893 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
894 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
895 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
896 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
897 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
898 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
899 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
900 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
902 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
903 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
904 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
905 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
906 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
907 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
908 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
909 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
913 ctxt->ops->put_fpu(ctxt);
916 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
919 ctxt->ops->get_fpu(ctxt);
921 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
922 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
923 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
924 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
925 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
926 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
927 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
928 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
930 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
931 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
932 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
933 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
934 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
935 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
936 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
937 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
941 ctxt->ops->put_fpu(ctxt);
944 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
946 ctxt->ops->get_fpu(ctxt);
948 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
949 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
950 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
951 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
952 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
953 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
954 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
955 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
958 ctxt->ops->put_fpu(ctxt);
961 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
963 ctxt->ops->get_fpu(ctxt);
965 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
966 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
967 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
968 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
969 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
970 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
971 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
972 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
975 ctxt->ops->put_fpu(ctxt);
978 static int em_fninit(struct x86_emulate_ctxt *ctxt)
980 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
981 return emulate_nm(ctxt);
983 ctxt->ops->get_fpu(ctxt);
984 asm volatile("fninit");
985 ctxt->ops->put_fpu(ctxt);
986 return X86EMUL_CONTINUE;
989 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
993 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
994 return emulate_nm(ctxt);
996 ctxt->ops->get_fpu(ctxt);
997 asm volatile("fnstcw %0": "+m"(fcw));
998 ctxt->ops->put_fpu(ctxt);
1000 /* force 2 byte destination */
1001 ctxt->dst.bytes = 2;
1002 ctxt->dst.val = fcw;
1004 return X86EMUL_CONTINUE;
1007 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1011 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1012 return emulate_nm(ctxt);
1014 ctxt->ops->get_fpu(ctxt);
1015 asm volatile("fnstsw %0": "+m"(fsw));
1016 ctxt->ops->put_fpu(ctxt);
1018 /* force 2 byte destination */
1019 ctxt->dst.bytes = 2;
1020 ctxt->dst.val = fsw;
1022 return X86EMUL_CONTINUE;
1025 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1028 unsigned reg = ctxt->modrm_reg;
1030 if (!(ctxt->d & ModRM))
1031 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1033 if (ctxt->d & Sse) {
1037 read_sse_reg(ctxt, &op->vec_val, reg);
1040 if (ctxt->d & Mmx) {
1049 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1050 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1052 fetch_register_operand(op);
1053 op->orig_val = op->val;
1056 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1058 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1059 ctxt->modrm_seg = VCPU_SREG_SS;
1062 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1066 int index_reg, base_reg, scale;
1067 int rc = X86EMUL_CONTINUE;
1070 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1071 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1072 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1074 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1075 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1076 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1077 ctxt->modrm_seg = VCPU_SREG_DS;
1079 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1081 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1082 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1084 if (ctxt->d & Sse) {
1087 op->addr.xmm = ctxt->modrm_rm;
1088 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1091 if (ctxt->d & Mmx) {
1094 op->addr.mm = ctxt->modrm_rm & 7;
1097 fetch_register_operand(op);
1103 if (ctxt->ad_bytes == 2) {
1104 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1105 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1106 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1107 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1109 /* 16-bit ModR/M decode. */
1110 switch (ctxt->modrm_mod) {
1112 if (ctxt->modrm_rm == 6)
1113 modrm_ea += insn_fetch(u16, ctxt);
1116 modrm_ea += insn_fetch(s8, ctxt);
1119 modrm_ea += insn_fetch(u16, ctxt);
1122 switch (ctxt->modrm_rm) {
1124 modrm_ea += bx + si;
1127 modrm_ea += bx + di;
1130 modrm_ea += bp + si;
1133 modrm_ea += bp + di;
1142 if (ctxt->modrm_mod != 0)
1149 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1150 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1151 ctxt->modrm_seg = VCPU_SREG_SS;
1152 modrm_ea = (u16)modrm_ea;
1154 /* 32/64-bit ModR/M decode. */
1155 if ((ctxt->modrm_rm & 7) == 4) {
1156 sib = insn_fetch(u8, ctxt);
1157 index_reg |= (sib >> 3) & 7;
1158 base_reg |= sib & 7;
1161 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1162 modrm_ea += insn_fetch(s32, ctxt);
1164 modrm_ea += reg_read(ctxt, base_reg);
1165 adjust_modrm_seg(ctxt, base_reg);
1168 modrm_ea += reg_read(ctxt, index_reg) << scale;
1169 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1170 if (ctxt->mode == X86EMUL_MODE_PROT64)
1171 ctxt->rip_relative = 1;
1173 base_reg = ctxt->modrm_rm;
1174 modrm_ea += reg_read(ctxt, base_reg);
1175 adjust_modrm_seg(ctxt, base_reg);
1177 switch (ctxt->modrm_mod) {
1179 if (ctxt->modrm_rm == 5)
1180 modrm_ea += insn_fetch(s32, ctxt);
1183 modrm_ea += insn_fetch(s8, ctxt);
1186 modrm_ea += insn_fetch(s32, ctxt);
1190 op->addr.mem.ea = modrm_ea;
1191 if (ctxt->ad_bytes != 8)
1192 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1198 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1201 int rc = X86EMUL_CONTINUE;
1204 switch (ctxt->ad_bytes) {
1206 op->addr.mem.ea = insn_fetch(u16, ctxt);
1209 op->addr.mem.ea = insn_fetch(u32, ctxt);
1212 op->addr.mem.ea = insn_fetch(u64, ctxt);
1219 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1223 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1224 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1226 if (ctxt->src.bytes == 2)
1227 sv = (s16)ctxt->src.val & (s16)mask;
1228 else if (ctxt->src.bytes == 4)
1229 sv = (s32)ctxt->src.val & (s32)mask;
1231 sv = (s64)ctxt->src.val & (s64)mask;
1233 ctxt->dst.addr.mem.ea += (sv >> 3);
1236 /* only subword offset */
1237 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1240 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1241 unsigned long addr, void *dest, unsigned size)
1244 struct read_cache *mc = &ctxt->mem_read;
1246 if (mc->pos < mc->end)
1249 WARN_ON((mc->end + size) >= sizeof(mc->data));
1251 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1253 if (rc != X86EMUL_CONTINUE)
1259 memcpy(dest, mc->data + mc->pos, size);
1261 return X86EMUL_CONTINUE;
1264 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1265 struct segmented_address addr,
1272 rc = linearize(ctxt, addr, size, false, &linear);
1273 if (rc != X86EMUL_CONTINUE)
1275 return read_emulated(ctxt, linear, data, size);
1278 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1279 struct segmented_address addr,
1286 rc = linearize(ctxt, addr, size, true, &linear);
1287 if (rc != X86EMUL_CONTINUE)
1289 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1293 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1294 struct segmented_address addr,
1295 const void *orig_data, const void *data,
1301 rc = linearize(ctxt, addr, size, true, &linear);
1302 if (rc != X86EMUL_CONTINUE)
1304 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1305 size, &ctxt->exception);
1308 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1309 unsigned int size, unsigned short port,
1312 struct read_cache *rc = &ctxt->io_read;
1314 if (rc->pos == rc->end) { /* refill pio read ahead */
1315 unsigned int in_page, n;
1316 unsigned int count = ctxt->rep_prefix ?
1317 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1318 in_page = (ctxt->eflags & EFLG_DF) ?
1319 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1320 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1321 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1324 rc->pos = rc->end = 0;
1325 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1330 if (ctxt->rep_prefix && (ctxt->d & String) &&
1331 !(ctxt->eflags & EFLG_DF)) {
1332 ctxt->dst.data = rc->data + rc->pos;
1333 ctxt->dst.type = OP_MEM_STR;
1334 ctxt->dst.count = (rc->end - rc->pos) / size;
1337 memcpy(dest, rc->data + rc->pos, size);
1343 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1344 u16 index, struct desc_struct *desc)
1349 ctxt->ops->get_idt(ctxt, &dt);
1351 if (dt.size < index * 8 + 7)
1352 return emulate_gp(ctxt, index << 3 | 0x2);
1354 addr = dt.address + index * 8;
1355 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1359 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1360 u16 selector, struct desc_ptr *dt)
1362 const struct x86_emulate_ops *ops = ctxt->ops;
1365 if (selector & 1 << 2) {
1366 struct desc_struct desc;
1369 memset (dt, 0, sizeof *dt);
1370 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1374 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1375 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1377 ops->get_gdt(ctxt, dt);
1380 /* allowed just for 8 bytes segments */
1381 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1382 u16 selector, struct desc_struct *desc,
1386 u16 index = selector >> 3;
1389 get_descriptor_table_ptr(ctxt, selector, &dt);
1391 if (dt.size < index * 8 + 7)
1392 return emulate_gp(ctxt, selector & 0xfffc);
1394 *desc_addr_p = addr = dt.address + index * 8;
1395 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1399 /* allowed just for 8 bytes segments */
1400 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1401 u16 selector, struct desc_struct *desc)
1404 u16 index = selector >> 3;
1407 get_descriptor_table_ptr(ctxt, selector, &dt);
1409 if (dt.size < index * 8 + 7)
1410 return emulate_gp(ctxt, selector & 0xfffc);
1412 addr = dt.address + index * 8;
1413 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1417 /* Does not support long mode */
1418 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1419 u16 selector, int seg, u8 cpl, bool in_task_switch)
1421 struct desc_struct seg_desc, old_desc;
1423 unsigned err_vec = GP_VECTOR;
1425 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1431 memset(&seg_desc, 0, sizeof seg_desc);
1433 if (ctxt->mode == X86EMUL_MODE_REAL) {
1434 /* set real mode segment descriptor (keep limit etc. for
1436 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1437 set_desc_base(&seg_desc, selector << 4);
1439 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1440 /* VM86 needs a clean new segment descriptor */
1441 set_desc_base(&seg_desc, selector << 4);
1442 set_desc_limit(&seg_desc, 0xffff);
1452 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1453 if ((seg == VCPU_SREG_CS
1454 || (seg == VCPU_SREG_SS
1455 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1456 || seg == VCPU_SREG_TR)
1460 /* TR should be in GDT only */
1461 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1464 if (null_selector) /* for NULL selector skip all following checks */
1467 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1468 if (ret != X86EMUL_CONTINUE)
1471 err_code = selector & 0xfffc;
1472 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1474 /* can't load system descriptor into segment selector */
1475 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1479 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1488 * segment is not a writable data segment or segment
1489 * selector's RPL != CPL or segment selector's RPL != CPL
1491 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1495 if (!(seg_desc.type & 8))
1498 if (seg_desc.type & 4) {
1504 if (rpl > cpl || dpl != cpl)
1507 /* CS(RPL) <- CPL */
1508 selector = (selector & 0xfffc) | cpl;
1511 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1513 old_desc = seg_desc;
1514 seg_desc.type |= 2; /* busy */
1515 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1516 sizeof(seg_desc), &ctxt->exception);
1517 if (ret != X86EMUL_CONTINUE)
1520 case VCPU_SREG_LDTR:
1521 if (seg_desc.s || seg_desc.type != 2)
1524 default: /* DS, ES, FS, or GS */
1526 * segment is not a data or readable code segment or
1527 * ((segment is a data or nonconforming code segment)
1528 * and (both RPL and CPL > DPL))
1530 if ((seg_desc.type & 0xa) == 0x8 ||
1531 (((seg_desc.type & 0xc) != 0xc) &&
1532 (rpl > dpl && cpl > dpl)))
1538 /* mark segment as accessed */
1540 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1541 if (ret != X86EMUL_CONTINUE)
1543 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1544 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1545 sizeof(base3), &ctxt->exception);
1546 if (ret != X86EMUL_CONTINUE)
1550 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1551 return X86EMUL_CONTINUE;
1553 return emulate_exception(ctxt, err_vec, err_code, true);
1556 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1557 u16 selector, int seg)
1559 u8 cpl = ctxt->ops->cpl(ctxt);
1560 return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
1563 static void write_register_operand(struct operand *op)
1565 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1566 switch (op->bytes) {
1568 *(u8 *)op->addr.reg = (u8)op->val;
1571 *(u16 *)op->addr.reg = (u16)op->val;
1574 *op->addr.reg = (u32)op->val;
1575 break; /* 64b: zero-extend */
1577 *op->addr.reg = op->val;
1582 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1586 write_register_operand(op);
1589 if (ctxt->lock_prefix)
1590 return segmented_cmpxchg(ctxt,
1596 return segmented_write(ctxt,
1602 return segmented_write(ctxt,
1605 op->bytes * op->count);
1608 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1611 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1619 return X86EMUL_CONTINUE;
1622 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1624 struct segmented_address addr;
1626 rsp_increment(ctxt, -bytes);
1627 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1628 addr.seg = VCPU_SREG_SS;
1630 return segmented_write(ctxt, addr, data, bytes);
1633 static int em_push(struct x86_emulate_ctxt *ctxt)
1635 /* Disable writeback. */
1636 ctxt->dst.type = OP_NONE;
1637 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1640 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1641 void *dest, int len)
1644 struct segmented_address addr;
1646 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1647 addr.seg = VCPU_SREG_SS;
1648 rc = segmented_read(ctxt, addr, dest, len);
1649 if (rc != X86EMUL_CONTINUE)
1652 rsp_increment(ctxt, len);
1656 static int em_pop(struct x86_emulate_ctxt *ctxt)
1658 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1661 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1662 void *dest, int len)
1665 unsigned long val, change_mask;
1666 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1667 int cpl = ctxt->ops->cpl(ctxt);
1669 rc = emulate_pop(ctxt, &val, len);
1670 if (rc != X86EMUL_CONTINUE)
1673 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1674 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1676 switch(ctxt->mode) {
1677 case X86EMUL_MODE_PROT64:
1678 case X86EMUL_MODE_PROT32:
1679 case X86EMUL_MODE_PROT16:
1681 change_mask |= EFLG_IOPL;
1683 change_mask |= EFLG_IF;
1685 case X86EMUL_MODE_VM86:
1687 return emulate_gp(ctxt, 0);
1688 change_mask |= EFLG_IF;
1690 default: /* real mode */
1691 change_mask |= (EFLG_IOPL | EFLG_IF);
1695 *(unsigned long *)dest =
1696 (ctxt->eflags & ~change_mask) | (val & change_mask);
1701 static int em_popf(struct x86_emulate_ctxt *ctxt)
1703 ctxt->dst.type = OP_REG;
1704 ctxt->dst.addr.reg = &ctxt->eflags;
1705 ctxt->dst.bytes = ctxt->op_bytes;
1706 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1709 static int em_enter(struct x86_emulate_ctxt *ctxt)
1712 unsigned frame_size = ctxt->src.val;
1713 unsigned nesting_level = ctxt->src2.val & 31;
1717 return X86EMUL_UNHANDLEABLE;
1719 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1720 rc = push(ctxt, &rbp, stack_size(ctxt));
1721 if (rc != X86EMUL_CONTINUE)
1723 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1725 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1726 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1728 return X86EMUL_CONTINUE;
1731 static int em_leave(struct x86_emulate_ctxt *ctxt)
1733 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1735 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1738 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1740 int seg = ctxt->src2.val;
1742 ctxt->src.val = get_segment_selector(ctxt, seg);
1744 return em_push(ctxt);
1747 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1749 int seg = ctxt->src2.val;
1750 unsigned long selector;
1753 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1754 if (rc != X86EMUL_CONTINUE)
1757 if (ctxt->modrm_reg == VCPU_SREG_SS)
1758 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1760 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1764 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1766 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1767 int rc = X86EMUL_CONTINUE;
1768 int reg = VCPU_REGS_RAX;
1770 while (reg <= VCPU_REGS_RDI) {
1771 (reg == VCPU_REGS_RSP) ?
1772 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1775 if (rc != X86EMUL_CONTINUE)
1784 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1786 ctxt->src.val = (unsigned long)ctxt->eflags;
1787 return em_push(ctxt);
1790 static int em_popa(struct x86_emulate_ctxt *ctxt)
1792 int rc = X86EMUL_CONTINUE;
1793 int reg = VCPU_REGS_RDI;
1795 while (reg >= VCPU_REGS_RAX) {
1796 if (reg == VCPU_REGS_RSP) {
1797 rsp_increment(ctxt, ctxt->op_bytes);
1801 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1802 if (rc != X86EMUL_CONTINUE)
1809 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1811 const struct x86_emulate_ops *ops = ctxt->ops;
1818 /* TODO: Add limit checks */
1819 ctxt->src.val = ctxt->eflags;
1821 if (rc != X86EMUL_CONTINUE)
1824 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1826 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1828 if (rc != X86EMUL_CONTINUE)
1831 ctxt->src.val = ctxt->_eip;
1833 if (rc != X86EMUL_CONTINUE)
1836 ops->get_idt(ctxt, &dt);
1838 eip_addr = dt.address + (irq << 2);
1839 cs_addr = dt.address + (irq << 2) + 2;
1841 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1842 if (rc != X86EMUL_CONTINUE)
1845 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1846 if (rc != X86EMUL_CONTINUE)
1849 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1850 if (rc != X86EMUL_CONTINUE)
1858 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1862 invalidate_registers(ctxt);
1863 rc = __emulate_int_real(ctxt, irq);
1864 if (rc == X86EMUL_CONTINUE)
1865 writeback_registers(ctxt);
1869 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1871 switch(ctxt->mode) {
1872 case X86EMUL_MODE_REAL:
1873 return __emulate_int_real(ctxt, irq);
1874 case X86EMUL_MODE_VM86:
1875 case X86EMUL_MODE_PROT16:
1876 case X86EMUL_MODE_PROT32:
1877 case X86EMUL_MODE_PROT64:
1879 /* Protected mode interrupts unimplemented yet */
1880 return X86EMUL_UNHANDLEABLE;
1884 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1886 int rc = X86EMUL_CONTINUE;
1887 unsigned long temp_eip = 0;
1888 unsigned long temp_eflags = 0;
1889 unsigned long cs = 0;
1890 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1891 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1892 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1893 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1895 /* TODO: Add stack limit check */
1897 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1899 if (rc != X86EMUL_CONTINUE)
1902 if (temp_eip & ~0xffff)
1903 return emulate_gp(ctxt, 0);
1905 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1907 if (rc != X86EMUL_CONTINUE)
1910 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1912 if (rc != X86EMUL_CONTINUE)
1915 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1917 if (rc != X86EMUL_CONTINUE)
1920 ctxt->_eip = temp_eip;
1923 if (ctxt->op_bytes == 4)
1924 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1925 else if (ctxt->op_bytes == 2) {
1926 ctxt->eflags &= ~0xffff;
1927 ctxt->eflags |= temp_eflags;
1930 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1931 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1936 static int em_iret(struct x86_emulate_ctxt *ctxt)
1938 switch(ctxt->mode) {
1939 case X86EMUL_MODE_REAL:
1940 return emulate_iret_real(ctxt);
1941 case X86EMUL_MODE_VM86:
1942 case X86EMUL_MODE_PROT16:
1943 case X86EMUL_MODE_PROT32:
1944 case X86EMUL_MODE_PROT64:
1946 /* iret from protected mode unimplemented yet */
1947 return X86EMUL_UNHANDLEABLE;
1951 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1956 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1958 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1959 if (rc != X86EMUL_CONTINUE)
1963 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1964 return X86EMUL_CONTINUE;
1967 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1969 int rc = X86EMUL_CONTINUE;
1971 switch (ctxt->modrm_reg) {
1972 case 2: /* call near abs */ {
1974 old_eip = ctxt->_eip;
1975 ctxt->_eip = ctxt->src.val;
1976 ctxt->src.val = old_eip;
1980 case 4: /* jmp abs */
1981 ctxt->_eip = ctxt->src.val;
1983 case 5: /* jmp far */
1984 rc = em_jmp_far(ctxt);
1993 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1995 u64 old = ctxt->dst.orig_val64;
1997 if (ctxt->dst.bytes == 16)
1998 return X86EMUL_UNHANDLEABLE;
2000 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2001 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2002 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2003 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2004 ctxt->eflags &= ~EFLG_ZF;
2006 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2007 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2009 ctxt->eflags |= EFLG_ZF;
2011 return X86EMUL_CONTINUE;
2014 static int em_ret(struct x86_emulate_ctxt *ctxt)
2016 ctxt->dst.type = OP_REG;
2017 ctxt->dst.addr.reg = &ctxt->_eip;
2018 ctxt->dst.bytes = ctxt->op_bytes;
2019 return em_pop(ctxt);
2022 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2026 int cpl = ctxt->ops->cpl(ctxt);
2028 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2029 if (rc != X86EMUL_CONTINUE)
2031 if (ctxt->op_bytes == 4)
2032 ctxt->_eip = (u32)ctxt->_eip;
2033 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2034 if (rc != X86EMUL_CONTINUE)
2036 /* Outer-privilege level return is not implemented */
2037 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2038 return X86EMUL_UNHANDLEABLE;
2039 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2043 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2047 rc = em_ret_far(ctxt);
2048 if (rc != X86EMUL_CONTINUE)
2050 rsp_increment(ctxt, ctxt->src.val);
2051 return X86EMUL_CONTINUE;
2054 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2056 /* Save real source value, then compare EAX against destination. */
2057 ctxt->dst.orig_val = ctxt->dst.val;
2058 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2059 ctxt->src.orig_val = ctxt->src.val;
2060 ctxt->src.val = ctxt->dst.orig_val;
2061 fastop(ctxt, em_cmp);
2063 if (ctxt->eflags & EFLG_ZF) {
2064 /* Success: write back to memory. */
2065 ctxt->dst.val = ctxt->src.orig_val;
2067 /* Failure: write the value we saw to EAX. */
2068 ctxt->dst.type = OP_REG;
2069 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2070 ctxt->dst.val = ctxt->dst.orig_val;
2072 return X86EMUL_CONTINUE;
2075 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2077 int seg = ctxt->src2.val;
2081 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2083 rc = load_segment_descriptor(ctxt, sel, seg);
2084 if (rc != X86EMUL_CONTINUE)
2087 ctxt->dst.val = ctxt->src.val;
2092 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2093 struct desc_struct *cs, struct desc_struct *ss)
2095 cs->l = 0; /* will be adjusted later */
2096 set_desc_base(cs, 0); /* flat segment */
2097 cs->g = 1; /* 4kb granularity */
2098 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2099 cs->type = 0x0b; /* Read, Execute, Accessed */
2101 cs->dpl = 0; /* will be adjusted later */
2106 set_desc_base(ss, 0); /* flat segment */
2107 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2108 ss->g = 1; /* 4kb granularity */
2110 ss->type = 0x03; /* Read/Write, Accessed */
2111 ss->d = 1; /* 32bit stack segment */
2118 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2120 u32 eax, ebx, ecx, edx;
2123 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2124 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2125 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2126 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2129 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2131 const struct x86_emulate_ops *ops = ctxt->ops;
2132 u32 eax, ebx, ecx, edx;
2135 * syscall should always be enabled in longmode - so only become
2136 * vendor specific (cpuid) if other modes are active...
2138 if (ctxt->mode == X86EMUL_MODE_PROT64)
2143 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2145 * Intel ("GenuineIntel")
2146 * remark: Intel CPUs only support "syscall" in 64bit
2147 * longmode. Also an 64bit guest with a
2148 * 32bit compat-app running will #UD !! While this
2149 * behaviour can be fixed (by emulating) into AMD
2150 * response - CPUs of AMD can't behave like Intel.
2152 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2153 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2154 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2157 /* AMD ("AuthenticAMD") */
2158 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2159 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2160 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2163 /* AMD ("AMDisbetter!") */
2164 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2165 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2166 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2169 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2173 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2175 const struct x86_emulate_ops *ops = ctxt->ops;
2176 struct desc_struct cs, ss;
2181 /* syscall is not available in real mode */
2182 if (ctxt->mode == X86EMUL_MODE_REAL ||
2183 ctxt->mode == X86EMUL_MODE_VM86)
2184 return emulate_ud(ctxt);
2186 if (!(em_syscall_is_enabled(ctxt)))
2187 return emulate_ud(ctxt);
2189 ops->get_msr(ctxt, MSR_EFER, &efer);
2190 setup_syscalls_segments(ctxt, &cs, &ss);
2192 if (!(efer & EFER_SCE))
2193 return emulate_ud(ctxt);
2195 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2197 cs_sel = (u16)(msr_data & 0xfffc);
2198 ss_sel = (u16)(msr_data + 8);
2200 if (efer & EFER_LMA) {
2204 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2205 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2207 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2208 if (efer & EFER_LMA) {
2209 #ifdef CONFIG_X86_64
2210 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2213 ctxt->mode == X86EMUL_MODE_PROT64 ?
2214 MSR_LSTAR : MSR_CSTAR, &msr_data);
2215 ctxt->_eip = msr_data;
2217 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2218 ctxt->eflags &= ~msr_data;
2222 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2223 ctxt->_eip = (u32)msr_data;
2225 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2228 return X86EMUL_CONTINUE;
2231 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2233 const struct x86_emulate_ops *ops = ctxt->ops;
2234 struct desc_struct cs, ss;
2239 ops->get_msr(ctxt, MSR_EFER, &efer);
2240 /* inject #GP if in real mode */
2241 if (ctxt->mode == X86EMUL_MODE_REAL)
2242 return emulate_gp(ctxt, 0);
2245 * Not recognized on AMD in compat mode (but is recognized in legacy
2248 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2249 && !vendor_intel(ctxt))
2250 return emulate_ud(ctxt);
2252 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2253 * Therefore, we inject an #UD.
2255 if (ctxt->mode == X86EMUL_MODE_PROT64)
2256 return emulate_ud(ctxt);
2258 setup_syscalls_segments(ctxt, &cs, &ss);
2260 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2261 switch (ctxt->mode) {
2262 case X86EMUL_MODE_PROT32:
2263 if ((msr_data & 0xfffc) == 0x0)
2264 return emulate_gp(ctxt, 0);
2266 case X86EMUL_MODE_PROT64:
2267 if (msr_data == 0x0)
2268 return emulate_gp(ctxt, 0);
2274 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2275 cs_sel = (u16)msr_data;
2276 cs_sel &= ~SELECTOR_RPL_MASK;
2277 ss_sel = cs_sel + 8;
2278 ss_sel &= ~SELECTOR_RPL_MASK;
2279 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2284 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2285 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2287 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2288 ctxt->_eip = msr_data;
2290 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2291 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2293 return X86EMUL_CONTINUE;
2296 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2298 const struct x86_emulate_ops *ops = ctxt->ops;
2299 struct desc_struct cs, ss;
2302 u16 cs_sel = 0, ss_sel = 0;
2304 /* inject #GP if in real mode or Virtual 8086 mode */
2305 if (ctxt->mode == X86EMUL_MODE_REAL ||
2306 ctxt->mode == X86EMUL_MODE_VM86)
2307 return emulate_gp(ctxt, 0);
2309 setup_syscalls_segments(ctxt, &cs, &ss);
2311 if ((ctxt->rex_prefix & 0x8) != 0x0)
2312 usermode = X86EMUL_MODE_PROT64;
2314 usermode = X86EMUL_MODE_PROT32;
2318 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2320 case X86EMUL_MODE_PROT32:
2321 cs_sel = (u16)(msr_data + 16);
2322 if ((msr_data & 0xfffc) == 0x0)
2323 return emulate_gp(ctxt, 0);
2324 ss_sel = (u16)(msr_data + 24);
2326 case X86EMUL_MODE_PROT64:
2327 cs_sel = (u16)(msr_data + 32);
2328 if (msr_data == 0x0)
2329 return emulate_gp(ctxt, 0);
2330 ss_sel = cs_sel + 8;
2335 cs_sel |= SELECTOR_RPL_MASK;
2336 ss_sel |= SELECTOR_RPL_MASK;
2338 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2339 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2341 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2342 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2344 return X86EMUL_CONTINUE;
2347 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2350 if (ctxt->mode == X86EMUL_MODE_REAL)
2352 if (ctxt->mode == X86EMUL_MODE_VM86)
2354 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2355 return ctxt->ops->cpl(ctxt) > iopl;
2358 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2361 const struct x86_emulate_ops *ops = ctxt->ops;
2362 struct desc_struct tr_seg;
2365 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2366 unsigned mask = (1 << len) - 1;
2369 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2372 if (desc_limit_scaled(&tr_seg) < 103)
2374 base = get_desc_base(&tr_seg);
2375 #ifdef CONFIG_X86_64
2376 base |= ((u64)base3) << 32;
2378 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2379 if (r != X86EMUL_CONTINUE)
2381 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2383 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2384 if (r != X86EMUL_CONTINUE)
2386 if ((perm >> bit_idx) & mask)
2391 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2397 if (emulator_bad_iopl(ctxt))
2398 if (!emulator_io_port_access_allowed(ctxt, port, len))
2401 ctxt->perm_ok = true;
2406 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2407 struct tss_segment_16 *tss)
2409 tss->ip = ctxt->_eip;
2410 tss->flag = ctxt->eflags;
2411 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2412 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2413 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2414 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2415 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2416 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2417 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2418 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2420 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2421 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2422 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2423 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2424 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2427 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2428 struct tss_segment_16 *tss)
2433 ctxt->_eip = tss->ip;
2434 ctxt->eflags = tss->flag | 2;
2435 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2436 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2437 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2438 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2439 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2440 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2441 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2442 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2445 * SDM says that segment selectors are loaded before segment
2448 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2449 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2450 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2451 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2452 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2457 * Now load segment descriptors. If fault happens at this stage
2458 * it is handled in a context of new task
2460 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
2461 if (ret != X86EMUL_CONTINUE)
2463 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2464 if (ret != X86EMUL_CONTINUE)
2466 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2467 if (ret != X86EMUL_CONTINUE)
2469 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2470 if (ret != X86EMUL_CONTINUE)
2472 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2473 if (ret != X86EMUL_CONTINUE)
2476 return X86EMUL_CONTINUE;
2479 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2480 u16 tss_selector, u16 old_tss_sel,
2481 ulong old_tss_base, struct desc_struct *new_desc)
2483 const struct x86_emulate_ops *ops = ctxt->ops;
2484 struct tss_segment_16 tss_seg;
2486 u32 new_tss_base = get_desc_base(new_desc);
2488 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2490 if (ret != X86EMUL_CONTINUE)
2491 /* FIXME: need to provide precise fault address */
2494 save_state_to_tss16(ctxt, &tss_seg);
2496 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2498 if (ret != X86EMUL_CONTINUE)
2499 /* FIXME: need to provide precise fault address */
2502 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2504 if (ret != X86EMUL_CONTINUE)
2505 /* FIXME: need to provide precise fault address */
2508 if (old_tss_sel != 0xffff) {
2509 tss_seg.prev_task_link = old_tss_sel;
2511 ret = ops->write_std(ctxt, new_tss_base,
2512 &tss_seg.prev_task_link,
2513 sizeof tss_seg.prev_task_link,
2515 if (ret != X86EMUL_CONTINUE)
2516 /* FIXME: need to provide precise fault address */
2520 return load_state_from_tss16(ctxt, &tss_seg);
2523 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2524 struct tss_segment_32 *tss)
2526 /* CR3 and ldt selector are not saved intentionally */
2527 tss->eip = ctxt->_eip;
2528 tss->eflags = ctxt->eflags;
2529 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2530 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2531 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2532 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2533 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2534 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2535 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2536 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2538 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2539 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2540 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2541 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2542 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2543 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2546 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2547 struct tss_segment_32 *tss)
2552 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2553 return emulate_gp(ctxt, 0);
2554 ctxt->_eip = tss->eip;
2555 ctxt->eflags = tss->eflags | 2;
2557 /* General purpose registers */
2558 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2559 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2560 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2561 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2562 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2563 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2564 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2565 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2568 * SDM says that segment selectors are loaded before segment
2569 * descriptors. This is important because CPL checks will
2572 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2573 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2574 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2575 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2576 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2577 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2578 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2581 * If we're switching between Protected Mode and VM86, we need to make
2582 * sure to update the mode before loading the segment descriptors so
2583 * that the selectors are interpreted correctly.
2585 if (ctxt->eflags & X86_EFLAGS_VM) {
2586 ctxt->mode = X86EMUL_MODE_VM86;
2589 ctxt->mode = X86EMUL_MODE_PROT32;
2594 * Now load segment descriptors. If fault happenes at this stage
2595 * it is handled in a context of new task
2597 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
2598 if (ret != X86EMUL_CONTINUE)
2600 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2601 if (ret != X86EMUL_CONTINUE)
2603 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2604 if (ret != X86EMUL_CONTINUE)
2606 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2607 if (ret != X86EMUL_CONTINUE)
2609 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2610 if (ret != X86EMUL_CONTINUE)
2612 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
2613 if (ret != X86EMUL_CONTINUE)
2615 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
2616 if (ret != X86EMUL_CONTINUE)
2619 return X86EMUL_CONTINUE;
2622 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2623 u16 tss_selector, u16 old_tss_sel,
2624 ulong old_tss_base, struct desc_struct *new_desc)
2626 const struct x86_emulate_ops *ops = ctxt->ops;
2627 struct tss_segment_32 tss_seg;
2629 u32 new_tss_base = get_desc_base(new_desc);
2630 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2631 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2633 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2635 if (ret != X86EMUL_CONTINUE)
2636 /* FIXME: need to provide precise fault address */
2639 save_state_to_tss32(ctxt, &tss_seg);
2641 /* Only GP registers and segment selectors are saved */
2642 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2643 ldt_sel_offset - eip_offset, &ctxt->exception);
2644 if (ret != X86EMUL_CONTINUE)
2645 /* FIXME: need to provide precise fault address */
2648 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2650 if (ret != X86EMUL_CONTINUE)
2651 /* FIXME: need to provide precise fault address */
2654 if (old_tss_sel != 0xffff) {
2655 tss_seg.prev_task_link = old_tss_sel;
2657 ret = ops->write_std(ctxt, new_tss_base,
2658 &tss_seg.prev_task_link,
2659 sizeof tss_seg.prev_task_link,
2661 if (ret != X86EMUL_CONTINUE)
2662 /* FIXME: need to provide precise fault address */
2666 return load_state_from_tss32(ctxt, &tss_seg);
2669 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2670 u16 tss_selector, int idt_index, int reason,
2671 bool has_error_code, u32 error_code)
2673 const struct x86_emulate_ops *ops = ctxt->ops;
2674 struct desc_struct curr_tss_desc, next_tss_desc;
2676 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2677 ulong old_tss_base =
2678 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2682 /* FIXME: old_tss_base == ~0 ? */
2684 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2685 if (ret != X86EMUL_CONTINUE)
2687 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2688 if (ret != X86EMUL_CONTINUE)
2691 /* FIXME: check that next_tss_desc is tss */
2694 * Check privileges. The three cases are task switch caused by...
2696 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2697 * 2. Exception/IRQ/iret: No check is performed
2698 * 3. jmp/call to TSS: Check against DPL of the TSS
2700 if (reason == TASK_SWITCH_GATE) {
2701 if (idt_index != -1) {
2702 /* Software interrupts */
2703 struct desc_struct task_gate_desc;
2706 ret = read_interrupt_descriptor(ctxt, idt_index,
2708 if (ret != X86EMUL_CONTINUE)
2711 dpl = task_gate_desc.dpl;
2712 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2713 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2715 } else if (reason != TASK_SWITCH_IRET) {
2716 int dpl = next_tss_desc.dpl;
2717 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2718 return emulate_gp(ctxt, tss_selector);
2722 desc_limit = desc_limit_scaled(&next_tss_desc);
2723 if (!next_tss_desc.p ||
2724 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2725 desc_limit < 0x2b)) {
2726 return emulate_ts(ctxt, tss_selector & 0xfffc);
2729 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2730 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2731 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2734 if (reason == TASK_SWITCH_IRET)
2735 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2737 /* set back link to prev task only if NT bit is set in eflags
2738 note that old_tss_sel is not used after this point */
2739 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2740 old_tss_sel = 0xffff;
2742 if (next_tss_desc.type & 8)
2743 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2744 old_tss_base, &next_tss_desc);
2746 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2747 old_tss_base, &next_tss_desc);
2748 if (ret != X86EMUL_CONTINUE)
2751 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2752 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2754 if (reason != TASK_SWITCH_IRET) {
2755 next_tss_desc.type |= (1 << 1); /* set busy flag */
2756 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2759 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2760 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2762 if (has_error_code) {
2763 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2764 ctxt->lock_prefix = 0;
2765 ctxt->src.val = (unsigned long) error_code;
2766 ret = em_push(ctxt);
2772 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2773 u16 tss_selector, int idt_index, int reason,
2774 bool has_error_code, u32 error_code)
2778 invalidate_registers(ctxt);
2779 ctxt->_eip = ctxt->eip;
2780 ctxt->dst.type = OP_NONE;
2782 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2783 has_error_code, error_code);
2785 if (rc == X86EMUL_CONTINUE) {
2786 ctxt->eip = ctxt->_eip;
2787 writeback_registers(ctxt);
2790 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2793 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2796 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2798 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2799 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2802 static int em_das(struct x86_emulate_ctxt *ctxt)
2805 bool af, cf, old_cf;
2807 cf = ctxt->eflags & X86_EFLAGS_CF;
2813 af = ctxt->eflags & X86_EFLAGS_AF;
2814 if ((al & 0x0f) > 9 || af) {
2816 cf = old_cf | (al >= 250);
2821 if (old_al > 0x99 || old_cf) {
2827 /* Set PF, ZF, SF */
2828 ctxt->src.type = OP_IMM;
2830 ctxt->src.bytes = 1;
2831 fastop(ctxt, em_or);
2832 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2834 ctxt->eflags |= X86_EFLAGS_CF;
2836 ctxt->eflags |= X86_EFLAGS_AF;
2837 return X86EMUL_CONTINUE;
2840 static int em_aam(struct x86_emulate_ctxt *ctxt)
2844 if (ctxt->src.val == 0)
2845 return emulate_de(ctxt);
2847 al = ctxt->dst.val & 0xff;
2848 ah = al / ctxt->src.val;
2849 al %= ctxt->src.val;
2851 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2853 /* Set PF, ZF, SF */
2854 ctxt->src.type = OP_IMM;
2856 ctxt->src.bytes = 1;
2857 fastop(ctxt, em_or);
2859 return X86EMUL_CONTINUE;
2862 static int em_aad(struct x86_emulate_ctxt *ctxt)
2864 u8 al = ctxt->dst.val & 0xff;
2865 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2867 al = (al + (ah * ctxt->src.val)) & 0xff;
2869 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2871 /* Set PF, ZF, SF */
2872 ctxt->src.type = OP_IMM;
2874 ctxt->src.bytes = 1;
2875 fastop(ctxt, em_or);
2877 return X86EMUL_CONTINUE;
2880 static int em_call(struct x86_emulate_ctxt *ctxt)
2882 long rel = ctxt->src.val;
2884 ctxt->src.val = (unsigned long)ctxt->_eip;
2886 return em_push(ctxt);
2889 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2895 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2896 old_eip = ctxt->_eip;
2898 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2899 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2900 return X86EMUL_CONTINUE;
2903 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2905 ctxt->src.val = old_cs;
2907 if (rc != X86EMUL_CONTINUE)
2910 ctxt->src.val = old_eip;
2911 return em_push(ctxt);
2914 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2918 ctxt->dst.type = OP_REG;
2919 ctxt->dst.addr.reg = &ctxt->_eip;
2920 ctxt->dst.bytes = ctxt->op_bytes;
2921 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2922 if (rc != X86EMUL_CONTINUE)
2924 rsp_increment(ctxt, ctxt->src.val);
2925 return X86EMUL_CONTINUE;
2928 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2930 /* Write back the register source. */
2931 ctxt->src.val = ctxt->dst.val;
2932 write_register_operand(&ctxt->src);
2934 /* Write back the memory destination with implicit LOCK prefix. */
2935 ctxt->dst.val = ctxt->src.orig_val;
2936 ctxt->lock_prefix = 1;
2937 return X86EMUL_CONTINUE;
2940 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2942 ctxt->dst.val = ctxt->src2.val;
2943 return fastop(ctxt, em_imul);
2946 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2948 ctxt->dst.type = OP_REG;
2949 ctxt->dst.bytes = ctxt->src.bytes;
2950 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2951 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2953 return X86EMUL_CONTINUE;
2956 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2960 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2961 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2962 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2963 return X86EMUL_CONTINUE;
2966 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2970 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2971 return emulate_gp(ctxt, 0);
2972 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2973 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2974 return X86EMUL_CONTINUE;
2977 static int em_mov(struct x86_emulate_ctxt *ctxt)
2979 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
2980 return X86EMUL_CONTINUE;
2983 #define FFL(x) bit(X86_FEATURE_##x)
2985 static int em_movbe(struct x86_emulate_ctxt *ctxt)
2987 u32 ebx, ecx, edx, eax = 1;
2991 * Check MOVBE is set in the guest-visible CPUID leaf.
2993 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2994 if (!(ecx & FFL(MOVBE)))
2995 return emulate_ud(ctxt);
2997 switch (ctxt->op_bytes) {
3000 * From MOVBE definition: "...When the operand size is 16 bits,
3001 * the upper word of the destination register remains unchanged
3004 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3005 * rules so we have to do the operation almost per hand.
3007 tmp = (u16)ctxt->src.val;
3008 ctxt->dst.val &= ~0xffffUL;
3009 ctxt->dst.val |= (unsigned long)swab16(tmp);
3012 ctxt->dst.val = swab32((u32)ctxt->src.val);
3015 ctxt->dst.val = swab64(ctxt->src.val);
3020 return X86EMUL_CONTINUE;
3023 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3025 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3026 return emulate_gp(ctxt, 0);
3028 /* Disable writeback. */
3029 ctxt->dst.type = OP_NONE;
3030 return X86EMUL_CONTINUE;
3033 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3037 if (ctxt->mode == X86EMUL_MODE_PROT64)
3038 val = ctxt->src.val & ~0ULL;
3040 val = ctxt->src.val & ~0U;
3042 /* #UD condition is already handled. */
3043 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3044 return emulate_gp(ctxt, 0);
3046 /* Disable writeback. */
3047 ctxt->dst.type = OP_NONE;
3048 return X86EMUL_CONTINUE;
3051 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3055 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3056 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3057 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3058 return emulate_gp(ctxt, 0);
3060 return X86EMUL_CONTINUE;
3063 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3067 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3068 return emulate_gp(ctxt, 0);
3070 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3071 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3072 return X86EMUL_CONTINUE;
3075 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3077 if (ctxt->modrm_reg > VCPU_SREG_GS)
3078 return emulate_ud(ctxt);
3080 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3081 return X86EMUL_CONTINUE;
3084 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3086 u16 sel = ctxt->src.val;
3088 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3089 return emulate_ud(ctxt);
3091 if (ctxt->modrm_reg == VCPU_SREG_SS)
3092 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3094 /* Disable writeback. */
3095 ctxt->dst.type = OP_NONE;
3096 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3099 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3101 u16 sel = ctxt->src.val;
3103 /* Disable writeback. */
3104 ctxt->dst.type = OP_NONE;
3105 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3108 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3110 u16 sel = ctxt->src.val;
3112 /* Disable writeback. */
3113 ctxt->dst.type = OP_NONE;
3114 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3117 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3122 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3123 if (rc == X86EMUL_CONTINUE)
3124 ctxt->ops->invlpg(ctxt, linear);
3125 /* Disable writeback. */
3126 ctxt->dst.type = OP_NONE;
3127 return X86EMUL_CONTINUE;
3130 static int em_clts(struct x86_emulate_ctxt *ctxt)
3134 cr0 = ctxt->ops->get_cr(ctxt, 0);
3136 ctxt->ops->set_cr(ctxt, 0, cr0);
3137 return X86EMUL_CONTINUE;
3140 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3142 int rc = ctxt->ops->fix_hypercall(ctxt);
3144 if (rc != X86EMUL_CONTINUE)
3147 /* Let the processor re-execute the fixed hypercall */
3148 ctxt->_eip = ctxt->eip;
3149 /* Disable writeback. */
3150 ctxt->dst.type = OP_NONE;
3151 return X86EMUL_CONTINUE;
3154 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3155 void (*get)(struct x86_emulate_ctxt *ctxt,
3156 struct desc_ptr *ptr))
3158 struct desc_ptr desc_ptr;
3160 if (ctxt->mode == X86EMUL_MODE_PROT64)
3162 get(ctxt, &desc_ptr);
3163 if (ctxt->op_bytes == 2) {
3165 desc_ptr.address &= 0x00ffffff;
3167 /* Disable writeback. */
3168 ctxt->dst.type = OP_NONE;
3169 return segmented_write(ctxt, ctxt->dst.addr.mem,
3170 &desc_ptr, 2 + ctxt->op_bytes);
3173 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3175 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3178 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3180 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3183 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3185 struct desc_ptr desc_ptr;
3188 if (ctxt->mode == X86EMUL_MODE_PROT64)
3190 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3191 &desc_ptr.size, &desc_ptr.address,
3193 if (rc != X86EMUL_CONTINUE)
3195 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3196 /* Disable writeback. */
3197 ctxt->dst.type = OP_NONE;
3198 return X86EMUL_CONTINUE;
3201 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3205 rc = ctxt->ops->fix_hypercall(ctxt);
3207 /* Disable writeback. */
3208 ctxt->dst.type = OP_NONE;
3212 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3214 struct desc_ptr desc_ptr;
3217 if (ctxt->mode == X86EMUL_MODE_PROT64)
3219 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3220 &desc_ptr.size, &desc_ptr.address,
3222 if (rc != X86EMUL_CONTINUE)
3224 ctxt->ops->set_idt(ctxt, &desc_ptr);
3225 /* Disable writeback. */
3226 ctxt->dst.type = OP_NONE;
3227 return X86EMUL_CONTINUE;
3230 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3232 if (ctxt->dst.type == OP_MEM)
3233 ctxt->dst.bytes = 2;
3234 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3235 return X86EMUL_CONTINUE;
3238 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3240 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3241 | (ctxt->src.val & 0x0f));
3242 ctxt->dst.type = OP_NONE;
3243 return X86EMUL_CONTINUE;
3246 static int em_loop(struct x86_emulate_ctxt *ctxt)
3248 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3249 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3250 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3251 jmp_rel(ctxt, ctxt->src.val);
3253 return X86EMUL_CONTINUE;
3256 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3258 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3259 jmp_rel(ctxt, ctxt->src.val);
3261 return X86EMUL_CONTINUE;
3264 static int em_in(struct x86_emulate_ctxt *ctxt)
3266 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3268 return X86EMUL_IO_NEEDED;
3270 return X86EMUL_CONTINUE;
3273 static int em_out(struct x86_emulate_ctxt *ctxt)
3275 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3277 /* Disable writeback. */
3278 ctxt->dst.type = OP_NONE;
3279 return X86EMUL_CONTINUE;
3282 static int em_cli(struct x86_emulate_ctxt *ctxt)
3284 if (emulator_bad_iopl(ctxt))
3285 return emulate_gp(ctxt, 0);
3287 ctxt->eflags &= ~X86_EFLAGS_IF;
3288 return X86EMUL_CONTINUE;
3291 static int em_sti(struct x86_emulate_ctxt *ctxt)
3293 if (emulator_bad_iopl(ctxt))
3294 return emulate_gp(ctxt, 0);
3296 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3297 ctxt->eflags |= X86_EFLAGS_IF;
3298 return X86EMUL_CONTINUE;
3301 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3303 u32 eax, ebx, ecx, edx;
3305 eax = reg_read(ctxt, VCPU_REGS_RAX);
3306 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3307 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);