Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3 * emulate.c
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
11 *
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include "kvm_emulate.h"
24#include <linux/stringify.h>
25#include <asm/debugreg.h>
26#include <asm/nospec-branch.h>
27
28#include "x86.h"
29#include "tss.h"
30#include "mmu.h"
31#include "pmu.h"
32
33/*
34 * Operand types
35 */
36#define OpNone 0ull
37#define OpImplicit 1ull /* No generic decode */
38#define OpReg 2ull /* Register */
39#define OpMem 3ull /* Memory */
40#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41#define OpDI 5ull /* ES:DI/EDI/RDI */
42#define OpMem64 6ull /* Memory, 64-bit */
43#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44#define OpDX 8ull /* DX register */
45#define OpCL 9ull /* CL register (for shifts) */
46#define OpImmByte 10ull /* 8-bit sign extended immediate */
47#define OpOne 11ull /* Implied 1 */
48#define OpImm 12ull /* Sign extended up to 32-bit immediate */
49#define OpMem16 13ull /* Memory operand (16-bit). */
50#define OpMem32 14ull /* Memory operand (32-bit). */
51#define OpImmU 15ull /* Immediate operand, zero extended */
52#define OpSI 16ull /* SI/ESI/RSI */
53#define OpImmFAddr 17ull /* Immediate far address */
54#define OpMemFAddr 18ull /* Far address in memory */
55#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56#define OpES 20ull /* ES */
57#define OpCS 21ull /* CS */
58#define OpSS 22ull /* SS */
59#define OpDS 23ull /* DS */
60#define OpFS 24ull /* FS */
61#define OpGS 25ull /* GS */
62#define OpMem8 26ull /* 8-bit zero extended memory operand */
63#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67
68#define OpBits 5 /* Width of operand field */
69#define OpMask ((1ull << OpBits) - 1)
70
71/*
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77 * not be handled.
78 */
79
80/* Operand sizes: 8-bit operands or specified/overridden size. */
81#define ByteOp (1<<0) /* 8-bit operands. */
82/* Destination operand type. */
83#define DstShift 1
84#define ImplicitOps (OpImplicit << DstShift)
85#define DstReg (OpReg << DstShift)
86#define DstMem (OpMem << DstShift)
87#define DstAcc (OpAcc << DstShift)
88#define DstDI (OpDI << DstShift)
89#define DstMem64 (OpMem64 << DstShift)
90#define DstMem16 (OpMem16 << DstShift)
91#define DstImmUByte (OpImmUByte << DstShift)
92#define DstDX (OpDX << DstShift)
93#define DstAccLo (OpAccLo << DstShift)
94#define DstMask (OpMask << DstShift)
95/* Source operand type. */
96#define SrcShift 6
97#define SrcNone (OpNone << SrcShift)
98#define SrcReg (OpReg << SrcShift)
99#define SrcMem (OpMem << SrcShift)
100#define SrcMem16 (OpMem16 << SrcShift)
101#define SrcMem32 (OpMem32 << SrcShift)
102#define SrcImm (OpImm << SrcShift)
103#define SrcImmByte (OpImmByte << SrcShift)
104#define SrcOne (OpOne << SrcShift)
105#define SrcImmUByte (OpImmUByte << SrcShift)
106#define SrcImmU (OpImmU << SrcShift)
107#define SrcSI (OpSI << SrcShift)
108#define SrcXLat (OpXLat << SrcShift)
109#define SrcImmFAddr (OpImmFAddr << SrcShift)
110#define SrcMemFAddr (OpMemFAddr << SrcShift)
111#define SrcAcc (OpAcc << SrcShift)
112#define SrcImmU16 (OpImmU16 << SrcShift)
113#define SrcImm64 (OpImm64 << SrcShift)
114#define SrcDX (OpDX << SrcShift)
115#define SrcMem8 (OpMem8 << SrcShift)
116#define SrcAccHi (OpAccHi << SrcShift)
117#define SrcMask (OpMask << SrcShift)
118#define BitOp (1<<11)
119#define MemAbs (1<<12) /* Memory operand is absolute displacement */
120#define String (1<<13) /* String instruction (rep capable) */
121#define Stack (1<<14) /* Stack instruction (push/pop) */
122#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127#define Escape (5<<15) /* Escape to coprocessor instruction */
128#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130#define Sse (1<<18) /* SSE Vector instruction */
131/* Generic ModRM decode. */
132#define ModRM (1<<19)
133/* Destination is only written; never read. */
134#define Mov (1<<20)
135/* Misc flags */
136#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140#define Undefined (1<<25) /* No Such Instruction */
141#define Lock (1<<26) /* lock prefix is allowed for the instruction */
142#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
143#define No64 (1<<28)
144#define PageTable (1 << 29) /* instruction used to write page table */
145#define NotImpl (1 << 30) /* instruction is not implemented */
146/* Source 2 operand type */
147#define Src2Shift (31)
148#define Src2None (OpNone << Src2Shift)
149#define Src2Mem (OpMem << Src2Shift)
150#define Src2CL (OpCL << Src2Shift)
151#define Src2ImmByte (OpImmByte << Src2Shift)
152#define Src2One (OpOne << Src2Shift)
153#define Src2Imm (OpImm << Src2Shift)
154#define Src2ES (OpES << Src2Shift)
155#define Src2CS (OpCS << Src2Shift)
156#define Src2SS (OpSS << Src2Shift)
157#define Src2DS (OpDS << Src2Shift)
158#define Src2FS (OpFS << Src2Shift)
159#define Src2GS (OpGS << Src2Shift)
160#define Src2Mask (OpMask << Src2Shift)
161#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162#define AlignMask ((u64)7 << 41)
163#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168#define NoWrite ((u64)1 << 45) /* No writeback */
169#define SrcWrite ((u64)1 << 46) /* Write back src operand */
170#define NoMod ((u64)1 << 47) /* Mod field is ignored */
171#define Intercept ((u64)1 << 48) /* Has valid intercept field */
172#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174#define NearBranch ((u64)1 << 52) /* Near branches */
175#define No16 ((u64)1 << 53) /* No 16 bit operand */
176#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178#define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
179
180#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182#define X2(x...) x, x
183#define X3(x...) X2(x), x
184#define X4(x...) X2(x), X2(x)
185#define X5(x...) X4(x), x
186#define X6(x...) X4(x), X2(x)
187#define X7(x...) X4(x), X3(x)
188#define X8(x...) X4(x), X4(x)
189#define X16(x...) X8(x), X8(x)
190
191#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192#define FASTOP_SIZE 8
193
194struct opcode {
195 u64 flags;
196 u8 intercept;
197 u8 pad[7];
198 union {
199 int (*execute)(struct x86_emulate_ctxt *ctxt);
200 const struct opcode *group;
201 const struct group_dual *gdual;
202 const struct gprefix *gprefix;
203 const struct escape *esc;
204 const struct instr_dual *idual;
205 const struct mode_dual *mdual;
206 void (*fastop)(struct fastop *fake);
207 } u;
208 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
209};
210
211struct group_dual {
212 struct opcode mod012[8];
213 struct opcode mod3[8];
214};
215
216struct gprefix {
217 struct opcode pfx_no;
218 struct opcode pfx_66;
219 struct opcode pfx_f2;
220 struct opcode pfx_f3;
221};
222
223struct escape {
224 struct opcode op[8];
225 struct opcode high[64];
226};
227
228struct instr_dual {
229 struct opcode mod012;
230 struct opcode mod3;
231};
232
233struct mode_dual {
234 struct opcode mode32;
235 struct opcode mode64;
236};
237
238#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
239
240enum x86_transfer_type {
241 X86_TRANSFER_NONE,
242 X86_TRANSFER_CALL_JMP,
243 X86_TRANSFER_RET,
244 X86_TRANSFER_TASK_SWITCH,
245};
246
247static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
248{
249 if (!(ctxt->regs_valid & (1 << nr))) {
250 ctxt->regs_valid |= 1 << nr;
251 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
252 }
253 return ctxt->_regs[nr];
254}
255
256static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
257{
258 ctxt->regs_valid |= 1 << nr;
259 ctxt->regs_dirty |= 1 << nr;
260 return &ctxt->_regs[nr];
261}
262
263static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
264{
265 reg_read(ctxt, nr);
266 return reg_write(ctxt, nr);
267}
268
269static void writeback_registers(struct x86_emulate_ctxt *ctxt)
270{
271 unsigned reg;
272
273 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
274 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
275}
276
277static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
278{
279 ctxt->regs_dirty = 0;
280 ctxt->regs_valid = 0;
281}
282
283/*
284 * These EFLAGS bits are restored from saved value during emulation, and
285 * any changes are written back to the saved value after emulation.
286 */
287#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
288 X86_EFLAGS_PF|X86_EFLAGS_CF)
289
290#ifdef CONFIG_X86_64
291#define ON64(x) x
292#else
293#define ON64(x)
294#endif
295
296/*
297 * fastop functions have a special calling convention:
298 *
299 * dst: rax (in/out)
300 * src: rdx (in/out)
301 * src2: rcx (in)
302 * flags: rflags (in/out)
303 * ex: rsi (in:fastop pointer, out:zero if exception)
304 *
305 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
306 * different operand sizes can be reached by calculation, rather than a jump
307 * table (which would be bigger than the code).
308 */
309static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
310
311#define __FOP_FUNC(name) \
312 ".align " __stringify(FASTOP_SIZE) " \n\t" \
313 ".type " name ", @function \n\t" \
314 name ":\n\t"
315
316#define FOP_FUNC(name) \
317 __FOP_FUNC(#name)
318
319#define __FOP_RET(name) \
320 "11: " ASM_RET \
321 ".size " name ", .-" name "\n\t"
322
323#define FOP_RET(name) \
324 __FOP_RET(#name)
325
326#define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
330 ".align " __stringify(FASTOP_SIZE) " \n\t" \
331 "em_" #op ":\n\t"
332
333#define FOP_END \
334 ".popsection")
335
336#define __FOPNOP(name) \
337 __FOP_FUNC(name) \
338 __FOP_RET(name)
339
340#define FOPNOP() \
341 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
342
343#define FOP1E(op, dst) \
344 __FOP_FUNC(#op "_" #dst) \
345 "10: " #op " %" #dst " \n\t" \
346 __FOP_RET(#op "_" #dst)
347
348#define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
350
351#define FASTOP1(op) \
352 FOP_START(op) \
353 FOP1E(op##b, al) \
354 FOP1E(op##w, ax) \
355 FOP1E(op##l, eax) \
356 ON64(FOP1E(op##q, rax)) \
357 FOP_END
358
359/* 1-operand, using src2 (for MUL/DIV r/m) */
360#define FASTOP1SRC2(op, name) \
361 FOP_START(name) \
362 FOP1E(op, cl) \
363 FOP1E(op, cx) \
364 FOP1E(op, ecx) \
365 ON64(FOP1E(op, rcx)) \
366 FOP_END
367
368/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369#define FASTOP1SRC2EX(op, name) \
370 FOP_START(name) \
371 FOP1EEX(op, cl) \
372 FOP1EEX(op, cx) \
373 FOP1EEX(op, ecx) \
374 ON64(FOP1EEX(op, rcx)) \
375 FOP_END
376
377#define FOP2E(op, dst, src) \
378 __FOP_FUNC(#op "_" #dst "_" #src) \
379 #op " %" #src ", %" #dst " \n\t" \
380 __FOP_RET(#op "_" #dst "_" #src)
381
382#define FASTOP2(op) \
383 FOP_START(op) \
384 FOP2E(op##b, al, dl) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
388 FOP_END
389
390/* 2 operand, word only */
391#define FASTOP2W(op) \
392 FOP_START(op) \
393 FOPNOP() \
394 FOP2E(op##w, ax, dx) \
395 FOP2E(op##l, eax, edx) \
396 ON64(FOP2E(op##q, rax, rdx)) \
397 FOP_END
398
399/* 2 operand, src is CL */
400#define FASTOP2CL(op) \
401 FOP_START(op) \
402 FOP2E(op##b, al, cl) \
403 FOP2E(op##w, ax, cl) \
404 FOP2E(op##l, eax, cl) \
405 ON64(FOP2E(op##q, rax, cl)) \
406 FOP_END
407
408/* 2 operand, src and dest are reversed */
409#define FASTOP2R(op, name) \
410 FOP_START(name) \
411 FOP2E(op##b, dl, al) \
412 FOP2E(op##w, dx, ax) \
413 FOP2E(op##l, edx, eax) \
414 ON64(FOP2E(op##q, rdx, rax)) \
415 FOP_END
416
417#define FOP3E(op, dst, src, src2) \
418 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
419 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
420 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
421
422/* 3-operand, word-only, src2=cl */
423#define FASTOP3WCL(op) \
424 FOP_START(op) \
425 FOPNOP() \
426 FOP3E(op##w, ax, dx, cl) \
427 FOP3E(op##l, eax, edx, cl) \
428 ON64(FOP3E(op##q, rax, rdx, cl)) \
429 FOP_END
430
431/* Special case for SETcc - 1 instruction per cc */
432
433/*
434 * Depending on .config the SETcc functions look like:
435 *
436 * SETcc %al [3 bytes]
437 * RET [1 byte]
438 * INT3 [1 byte; CONFIG_SLS]
439 *
440 * Which gives possible sizes 4 or 5. When rounded up to the
441 * next power-of-two alignment they become 4 or 8.
442 */
443#define SETCC_LENGTH (4 + IS_ENABLED(CONFIG_SLS))
444#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS))
445static_assert(SETCC_LENGTH <= SETCC_ALIGN);
446
447#define FOP_SETCC(op) \
448 ".align " __stringify(SETCC_ALIGN) " \n\t" \
449 ".type " #op ", @function \n\t" \
450 #op ": \n\t" \
451 #op " %al \n\t" \
452 __FOP_RET(#op)
453
454FOP_START(setcc)
455FOP_SETCC(seto)
456FOP_SETCC(setno)
457FOP_SETCC(setc)
458FOP_SETCC(setnc)
459FOP_SETCC(setz)
460FOP_SETCC(setnz)
461FOP_SETCC(setbe)
462FOP_SETCC(setnbe)
463FOP_SETCC(sets)
464FOP_SETCC(setns)
465FOP_SETCC(setp)
466FOP_SETCC(setnp)
467FOP_SETCC(setl)
468FOP_SETCC(setnl)
469FOP_SETCC(setle)
470FOP_SETCC(setnle)
471FOP_END;
472
473FOP_START(salc)
474FOP_FUNC(salc)
475"pushf; sbb %al, %al; popf \n\t"
476FOP_RET(salc)
477FOP_END;
478
479/*
480 * XXX: inoutclob user must know where the argument is being expanded.
481 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
482 */
483#define asm_safe(insn, inoutclob...) \
484({ \
485 int _fault = 0; \
486 \
487 asm volatile("1:" insn "\n" \
488 "2:\n" \
489 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
490 : [_fault] "+r"(_fault) inoutclob ); \
491 \
492 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
493})
494
495static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
496 enum x86_intercept intercept,
497 enum x86_intercept_stage stage)
498{
499 struct x86_instruction_info info = {
500 .intercept = intercept,
501 .rep_prefix = ctxt->rep_prefix,
502 .modrm_mod = ctxt->modrm_mod,
503 .modrm_reg = ctxt->modrm_reg,
504 .modrm_rm = ctxt->modrm_rm,
505 .src_val = ctxt->src.val64,
506 .dst_val = ctxt->dst.val64,
507 .src_bytes = ctxt->src.bytes,
508 .dst_bytes = ctxt->dst.bytes,
509 .ad_bytes = ctxt->ad_bytes,
510 .next_rip = ctxt->eip,
511 };
512
513 return ctxt->ops->intercept(ctxt, &info, stage);
514}
515
516static void assign_masked(ulong *dest, ulong src, ulong mask)
517{
518 *dest = (*dest & ~mask) | (src & mask);
519}
520
521static void assign_register(unsigned long *reg, u64 val, int bytes)
522{
523 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
524 switch (bytes) {
525 case 1:
526 *(u8 *)reg = (u8)val;
527 break;
528 case 2:
529 *(u16 *)reg = (u16)val;
530 break;
531 case 4:
532 *reg = (u32)val;
533 break; /* 64b: zero-extend */
534 case 8:
535 *reg = val;
536 break;
537 }
538}
539
540static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
541{
542 return (1UL << (ctxt->ad_bytes << 3)) - 1;
543}
544
545static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
546{
547 u16 sel;
548 struct desc_struct ss;
549
550 if (ctxt->mode == X86EMUL_MODE_PROT64)
551 return ~0UL;
552 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
553 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
554}
555
556static int stack_size(struct x86_emulate_ctxt *ctxt)
557{
558 return (__fls(stack_mask(ctxt)) + 1) >> 3;
559}
560
561/* Access/update address held in a register, based on addressing mode. */
562static inline unsigned long
563address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
564{
565 if (ctxt->ad_bytes == sizeof(unsigned long))
566 return reg;
567 else
568 return reg & ad_mask(ctxt);
569}
570
571static inline unsigned long
572register_address(struct x86_emulate_ctxt *ctxt, int reg)
573{
574 return address_mask(ctxt, reg_read(ctxt, reg));
575}
576
577static void masked_increment(ulong *reg, ulong mask, int inc)
578{
579 assign_masked(reg, *reg + inc, mask);
580}
581
582static inline void
583register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
584{
585 ulong *preg = reg_rmw(ctxt, reg);
586
587 assign_register(preg, *preg + inc, ctxt->ad_bytes);
588}
589
590static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
591{
592 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
593}
594
595static u32 desc_limit_scaled(struct desc_struct *desc)
596{
597 u32 limit = get_desc_limit(desc);
598
599 return desc->g ? (limit << 12) | 0xfff : limit;
600}
601
602static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
603{
604 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
605 return 0;
606
607 return ctxt->ops->get_cached_segment_base(ctxt, seg);
608}
609
610static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
611 u32 error, bool valid)
612{
613 WARN_ON(vec > 0x1f);
614 ctxt->exception.vector = vec;
615 ctxt->exception.error_code = error;
616 ctxt->exception.error_code_valid = valid;
617 return X86EMUL_PROPAGATE_FAULT;
618}
619
620static int emulate_db(struct x86_emulate_ctxt *ctxt)
621{
622 return emulate_exception(ctxt, DB_VECTOR, 0, false);
623}
624
625static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
626{
627 return emulate_exception(ctxt, GP_VECTOR, err, true);
628}
629
630static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
631{
632 return emulate_exception(ctxt, SS_VECTOR, err, true);
633}
634
635static int emulate_ud(struct x86_emulate_ctxt *ctxt)
636{
637 return emulate_exception(ctxt, UD_VECTOR, 0, false);
638}
639
640static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
641{
642 return emulate_exception(ctxt, TS_VECTOR, err, true);
643}
644
645static int emulate_de(struct x86_emulate_ctxt *ctxt)
646{
647 return emulate_exception(ctxt, DE_VECTOR, 0, false);
648}
649
650static int emulate_nm(struct x86_emulate_ctxt *ctxt)
651{
652 return emulate_exception(ctxt, NM_VECTOR, 0, false);
653}
654
655static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
656{
657 u16 selector;
658 struct desc_struct desc;
659
660 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
661 return selector;
662}
663
664static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
665 unsigned seg)
666{
667 u16 dummy;
668 u32 base3;
669 struct desc_struct desc;
670
671 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
672 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
673}
674
675static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
676{
677 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
678}
679
680static inline bool emul_is_noncanonical_address(u64 la,
681 struct x86_emulate_ctxt *ctxt)
682{
683 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
684}
685
686/*
687 * x86 defines three classes of vector instructions: explicitly
688 * aligned, explicitly unaligned, and the rest, which change behaviour
689 * depending on whether they're AVX encoded or not.
690 *
691 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
692 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
693 * 512 bytes of data must be aligned to a 16 byte boundary.
694 */
695static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
696{
697 u64 alignment = ctxt->d & AlignMask;
698
699 if (likely(size < 16))
700 return 1;
701
702 switch (alignment) {
703 case Unaligned:
704 case Avx:
705 return 1;
706 case Aligned16:
707 return 16;
708 case Aligned:
709 default:
710 return size;
711 }
712}
713
714static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
715 struct segmented_address addr,
716 unsigned *max_size, unsigned size,
717 bool write, bool fetch,
718 enum x86emul_mode mode, ulong *linear)
719{
720 struct desc_struct desc;
721 bool usable;
722 ulong la;
723 u32 lim;
724 u16 sel;
725 u8 va_bits;
726
727 la = seg_base(ctxt, addr.seg) + addr.ea;
728 *max_size = 0;
729 switch (mode) {
730 case X86EMUL_MODE_PROT64:
731 *linear = la;
732 va_bits = ctxt_virt_addr_bits(ctxt);
733 if (get_canonical(la, va_bits) != la)
734 goto bad;
735
736 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
737 if (size > *max_size)
738 goto bad;
739 break;
740 default:
741 *linear = la = (u32)la;
742 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
743 addr.seg);
744 if (!usable)
745 goto bad;
746 /* code segment in protected mode or read-only data segment */
747 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
748 || !(desc.type & 2)) && write)
749 goto bad;
750 /* unreadable code segment */
751 if (!fetch && (desc.type & 8) && !(desc.type & 2))
752 goto bad;
753 lim = desc_limit_scaled(&desc);
754 if (!(desc.type & 8) && (desc.type & 4)) {
755 /* expand-down segment */
756 if (addr.ea <= lim)
757 goto bad;
758 lim = desc.d ? 0xffffffff : 0xffff;
759 }
760 if (addr.ea > lim)
761 goto bad;
762 if (lim == 0xffffffff)
763 *max_size = ~0u;
764 else {
765 *max_size = (u64)lim + 1 - addr.ea;
766 if (size > *max_size)
767 goto bad;
768 }
769 break;
770 }
771 if (la & (insn_alignment(ctxt, size) - 1))
772 return emulate_gp(ctxt, 0);
773 return X86EMUL_CONTINUE;
774bad:
775 if (addr.seg == VCPU_SREG_SS)
776 return emulate_ss(ctxt, 0);
777 else
778 return emulate_gp(ctxt, 0);
779}
780
781static int linearize(struct x86_emulate_ctxt *ctxt,
782 struct segmented_address addr,
783 unsigned size, bool write,
784 ulong *linear)
785{
786 unsigned max_size;
787 return __linearize(ctxt, addr, &max_size, size, write, false,
788 ctxt->mode, linear);
789}
790
791static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
792 enum x86emul_mode mode)
793{
794 ulong linear;
795 int rc;
796 unsigned max_size;
797 struct segmented_address addr = { .seg = VCPU_SREG_CS,
798 .ea = dst };
799
800 if (ctxt->op_bytes != sizeof(unsigned long))
801 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
802 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
803 if (rc == X86EMUL_CONTINUE)
804 ctxt->_eip = addr.ea;
805 return rc;
806}
807
808static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
809{
810 return assign_eip(ctxt, dst, ctxt->mode);
811}
812
813static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
814 const struct desc_struct *cs_desc)
815{
816 enum x86emul_mode mode = ctxt->mode;
817 int rc;
818
819#ifdef CONFIG_X86_64
820 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
821 if (cs_desc->l) {
822 u64 efer = 0;
823
824 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
825 if (efer & EFER_LMA)
826 mode = X86EMUL_MODE_PROT64;
827 } else
828 mode = X86EMUL_MODE_PROT32; /* temporary value */
829 }
830#endif
831 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
832 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
833 rc = assign_eip(ctxt, dst, mode);
834 if (rc == X86EMUL_CONTINUE)
835 ctxt->mode = mode;
836 return rc;
837}
838
839static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
840{
841 return assign_eip_near(ctxt, ctxt->_eip + rel);
842}
843
844static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
845 void *data, unsigned size)
846{
847 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
848}
849
850static int linear_write_system(struct x86_emulate_ctxt *ctxt,
851 ulong linear, void *data,
852 unsigned int size)
853{
854 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
855}
856
857static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
858 struct segmented_address addr,
859 void *data,
860 unsigned size)
861{
862 int rc;
863 ulong linear;
864
865 rc = linearize(ctxt, addr, size, false, &linear);
866 if (rc != X86EMUL_CONTINUE)
867 return rc;
868 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
869}
870
871static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
872 struct segmented_address addr,
873 void *data,
874 unsigned int size)
875{
876 int rc;
877 ulong linear;
878
879 rc = linearize(ctxt, addr, size, true, &linear);
880 if (rc != X86EMUL_CONTINUE)
881 return rc;
882 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
883}
884
885/*
886 * Prefetch the remaining bytes of the instruction without crossing page
887 * boundary if they are not in fetch_cache yet.
888 */
889static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
890{
891 int rc;
892 unsigned size, max_size;
893 unsigned long linear;
894 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
895 struct segmented_address addr = { .seg = VCPU_SREG_CS,
896 .ea = ctxt->eip + cur_size };
897
898 /*
899 * We do not know exactly how many bytes will be needed, and
900 * __linearize is expensive, so fetch as much as possible. We
901 * just have to avoid going beyond the 15 byte limit, the end
902 * of the segment, or the end of the page.
903 *
904 * __linearize is called with size 0 so that it does not do any
905 * boundary check itself. Instead, we use max_size to check
906 * against op_size.
907 */
908 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
909 &linear);
910 if (unlikely(rc != X86EMUL_CONTINUE))
911 return rc;
912
913 size = min_t(unsigned, 15UL ^ cur_size, max_size);
914 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
915
916 /*
917 * One instruction can only straddle two pages,
918 * and one has been loaded at the beginning of
919 * x86_decode_insn. So, if not enough bytes
920 * still, we must have hit the 15-byte boundary.
921 */
922 if (unlikely(size < op_size))
923 return emulate_gp(ctxt, 0);
924
925 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
926 size, &ctxt->exception);
927 if (unlikely(rc != X86EMUL_CONTINUE))
928 return rc;
929 ctxt->fetch.end += size;
930 return X86EMUL_CONTINUE;
931}
932
933static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
934 unsigned size)
935{
936 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
937
938 if (unlikely(done_size < size))
939 return __do_insn_fetch_bytes(ctxt, size - done_size);
940 else
941 return X86EMUL_CONTINUE;
942}
943
944/* Fetch next part of the instruction being emulated. */
945#define insn_fetch(_type, _ctxt) \
946({ _type _x; \
947 \
948 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
949 if (rc != X86EMUL_CONTINUE) \
950 goto done; \
951 ctxt->_eip += sizeof(_type); \
952 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
953 ctxt->fetch.ptr += sizeof(_type); \
954 _x; \
955})
956
957#define insn_fetch_arr(_arr, _size, _ctxt) \
958({ \
959 rc = do_insn_fetch_bytes(_ctxt, _size); \
960 if (rc != X86EMUL_CONTINUE) \
961 goto done; \
962 ctxt->_eip += (_size); \
963 memcpy(_arr, ctxt->fetch.ptr, _size); \
964 ctxt->fetch.ptr += (_size); \
965})
966
967/*
968 * Given the 'reg' portion of a ModRM byte, and a register block, return a
969 * pointer into the block that addresses the relevant register.
970 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
971 */
972static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
973 int byteop)
974{
975 void *p;
976 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
977
978 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
979 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
980 else
981 p = reg_rmw(ctxt, modrm_reg);
982 return p;
983}
984
985static int read_descriptor(struct x86_emulate_ctxt *ctxt,
986 struct segmented_address addr,
987 u16 *size, unsigned long *address, int op_bytes)
988{
989 int rc;
990
991 if (op_bytes == 2)
992 op_bytes = 3;
993 *address = 0;
994 rc = segmented_read_std(ctxt, addr, size, 2);
995 if (rc != X86EMUL_CONTINUE)
996 return rc;
997 addr.ea += 2;
998 rc = segmented_read_std(ctxt, addr, address, op_bytes);
999 return rc;
1000}
1001
1002FASTOP2(add);
1003FASTOP2(or);
1004FASTOP2(adc);
1005FASTOP2(sbb);
1006FASTOP2(and);
1007FASTOP2(sub);
1008FASTOP2(xor);
1009FASTOP2(cmp);
1010FASTOP2(test);
1011
1012FASTOP1SRC2(mul, mul_ex);
1013FASTOP1SRC2(imul, imul_ex);
1014FASTOP1SRC2EX(div, div_ex);
1015FASTOP1SRC2EX(idiv, idiv_ex);
1016
1017FASTOP3WCL(shld);
1018FASTOP3WCL(shrd);
1019
1020FASTOP2W(imul);
1021
1022FASTOP1(not);
1023FASTOP1(neg);
1024FASTOP1(inc);
1025FASTOP1(dec);
1026
1027FASTOP2CL(rol);
1028FASTOP2CL(ror);
1029FASTOP2CL(rcl);
1030FASTOP2CL(rcr);
1031FASTOP2CL(shl);
1032FASTOP2CL(shr);
1033FASTOP2CL(sar);
1034
1035FASTOP2W(bsf);
1036FASTOP2W(bsr);
1037FASTOP2W(bt);
1038FASTOP2W(bts);
1039FASTOP2W(btr);
1040FASTOP2W(btc);
1041
1042FASTOP2(xadd);
1043
1044FASTOP2R(cmp, cmp_r);
1045
1046static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1047{
1048 /* If src is zero, do not writeback, but update flags */
1049 if (ctxt->src.val == 0)
1050 ctxt->dst.type = OP_NONE;
1051 return fastop(ctxt, em_bsf);
1052}
1053
1054static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1055{
1056 /* If src is zero, do not writeback, but update flags */
1057 if (ctxt->src.val == 0)
1058 ctxt->dst.type = OP_NONE;
1059 return fastop(ctxt, em_bsr);
1060}
1061
1062static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1063{
1064 u8 rc;
1065 void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
1066
1067 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1068 asm("push %[flags]; popf; " CALL_NOSPEC
1069 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1070 return rc;
1071}
1072
1073static void fetch_register_operand(struct operand *op)
1074{
1075 switch (op->bytes) {
1076 case 1:
1077 op->val = *(u8 *)op->addr.reg;
1078 break;
1079 case 2:
1080 op->val = *(u16 *)op->addr.reg;
1081 break;
1082 case 4:
1083 op->val = *(u32 *)op->addr.reg;
1084 break;
1085 case 8:
1086 op->val = *(u64 *)op->addr.reg;
1087 break;
1088 }
1089}
1090
1091static int em_fninit(struct x86_emulate_ctxt *ctxt)
1092{
1093 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1094 return emulate_nm(ctxt);
1095
1096 kvm_fpu_get();
1097 asm volatile("fninit");
1098 kvm_fpu_put();
1099 return X86EMUL_CONTINUE;
1100}
1101
1102static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1103{
1104 u16 fcw;
1105
1106 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1107 return emulate_nm(ctxt);
1108
1109 kvm_fpu_get();
1110 asm volatile("fnstcw %0": "+m"(fcw));
1111 kvm_fpu_put();
1112
1113 ctxt->dst.val = fcw;
1114
1115 return X86EMUL_CONTINUE;
1116}
1117
1118static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1119{
1120 u16 fsw;
1121
1122 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1123 return emulate_nm(ctxt);
1124
1125 kvm_fpu_get();
1126 asm volatile("fnstsw %0": "+m"(fsw));
1127 kvm_fpu_put();
1128
1129 ctxt->dst.val = fsw;
1130
1131 return X86EMUL_CONTINUE;
1132}
1133
1134static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1135 struct operand *op)
1136{
1137 unsigned reg = ctxt->modrm_reg;
1138
1139 if (!(ctxt->d & ModRM))
1140 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1141
1142 if (ctxt->d & Sse) {
1143 op->type = OP_XMM;
1144 op->bytes = 16;
1145 op->addr.xmm = reg;
1146 kvm_read_sse_reg(reg, &op->vec_val);
1147 return;
1148 }
1149 if (ctxt->d & Mmx) {
1150 reg &= 7;
1151 op->type = OP_MM;
1152 op->bytes = 8;
1153 op->addr.mm = reg;
1154 return;
1155 }
1156
1157 op->type = OP_REG;
1158 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1159 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1160
1161 fetch_register_operand(op);
1162 op->orig_val = op->val;
1163}
1164
1165static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1166{
1167 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1168 ctxt->modrm_seg = VCPU_SREG_SS;
1169}
1170
1171static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1172 struct operand *op)
1173{
1174 u8 sib;
1175 int index_reg, base_reg, scale;
1176 int rc = X86EMUL_CONTINUE;
1177 ulong modrm_ea = 0;
1178
1179 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1180 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1181 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1182
1183 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1184 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1185 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1186 ctxt->modrm_seg = VCPU_SREG_DS;
1187
1188 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1189 op->type = OP_REG;
1190 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1191 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1192 ctxt->d & ByteOp);
1193 if (ctxt->d & Sse) {
1194 op->type = OP_XMM;
1195 op->bytes = 16;
1196 op->addr.xmm = ctxt->modrm_rm;
1197 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1198 return rc;
1199 }
1200 if (ctxt->d & Mmx) {
1201 op->type = OP_MM;
1202 op->bytes = 8;
1203 op->addr.mm = ctxt->modrm_rm & 7;
1204 return rc;
1205 }
1206 fetch_register_operand(op);
1207 return rc;
1208 }
1209
1210 op->type = OP_MEM;
1211
1212 if (ctxt->ad_bytes == 2) {
1213 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1214 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1215 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1216 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1217
1218 /* 16-bit ModR/M decode. */
1219 switch (ctxt->modrm_mod) {
1220 case 0:
1221 if (ctxt->modrm_rm == 6)
1222 modrm_ea += insn_fetch(u16, ctxt);
1223 break;
1224 case 1:
1225 modrm_ea += insn_fetch(s8, ctxt);
1226 break;
1227 case 2:
1228 modrm_ea += insn_fetch(u16, ctxt);
1229 break;
1230 }
1231 switch (ctxt->modrm_rm) {
1232 case 0:
1233 modrm_ea += bx + si;
1234 break;
1235 case 1:
1236 modrm_ea += bx + di;
1237 break;
1238 case 2:
1239 modrm_ea += bp + si;
1240 break;
1241 case 3:
1242 modrm_ea += bp + di;
1243 break;
1244 case 4:
1245 modrm_ea += si;
1246 break;
1247 case 5:
1248 modrm_ea += di;
1249 break;
1250 case 6:
1251 if (ctxt->modrm_mod != 0)
1252 modrm_ea += bp;
1253 break;
1254 case 7:
1255 modrm_ea += bx;
1256 break;
1257 }
1258 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1259 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1260 ctxt->modrm_seg = VCPU_SREG_SS;
1261 modrm_ea = (u16)modrm_ea;
1262 } else {
1263 /* 32/64-bit ModR/M decode. */
1264 if ((ctxt->modrm_rm & 7) == 4) {
1265 sib = insn_fetch(u8, ctxt);
1266 index_reg |= (sib >> 3) & 7;
1267 base_reg |= sib & 7;
1268 scale = sib >> 6;
1269
1270 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1271 modrm_ea += insn_fetch(s32, ctxt);
1272 else {
1273 modrm_ea += reg_read(ctxt, base_reg);
1274 adjust_modrm_seg(ctxt, base_reg);
1275 /* Increment ESP on POP [ESP] */
1276 if ((ctxt->d & IncSP) &&
1277 base_reg == VCPU_REGS_RSP)
1278 modrm_ea += ctxt->op_bytes;
1279 }
1280 if (index_reg != 4)
1281 modrm_ea += reg_read(ctxt, index_reg) << scale;
1282 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1283 modrm_ea += insn_fetch(s32, ctxt);
1284 if (ctxt->mode == X86EMUL_MODE_PROT64)
1285 ctxt->rip_relative = 1;
1286 } else {
1287 base_reg = ctxt->modrm_rm;
1288 modrm_ea += reg_read(ctxt, base_reg);
1289 adjust_modrm_seg(ctxt, base_reg);
1290 }
1291 switch (ctxt->modrm_mod) {
1292 case 1:
1293 modrm_ea += insn_fetch(s8, ctxt);
1294 break;
1295 case 2:
1296 modrm_ea += insn_fetch(s32, ctxt);
1297 break;
1298 }
1299 }
1300 op->addr.mem.ea = modrm_ea;
1301 if (ctxt->ad_bytes != 8)
1302 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1303
1304done:
1305 return rc;
1306}
1307
1308static int decode_abs(struct x86_emulate_ctxt *ctxt,
1309 struct operand *op)
1310{
1311 int rc = X86EMUL_CONTINUE;
1312
1313 op->type = OP_MEM;
1314 switch (ctxt->ad_bytes) {
1315 case 2:
1316 op->addr.mem.ea = insn_fetch(u16, ctxt);
1317 break;
1318 case 4:
1319 op->addr.mem.ea = insn_fetch(u32, ctxt);
1320 break;
1321 case 8:
1322 op->addr.mem.ea = insn_fetch(u64, ctxt);
1323 break;
1324 }
1325done:
1326 return rc;
1327}
1328
1329static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1330{
1331 long sv = 0, mask;
1332
1333 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1334 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1335
1336 if (ctxt->src.bytes == 2)
1337 sv = (s16)ctxt->src.val & (s16)mask;
1338 else if (ctxt->src.bytes == 4)
1339 sv = (s32)ctxt->src.val & (s32)mask;
1340 else
1341 sv = (s64)ctxt->src.val & (s64)mask;
1342
1343 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1344 ctxt->dst.addr.mem.ea + (sv >> 3));
1345 }
1346
1347 /* only subword offset */
1348 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1349}
1350
1351static int read_emulated(struct x86_emulate_ctxt *ctxt,
1352 unsigned long addr, void *dest, unsigned size)
1353{
1354 int rc;
1355 struct read_cache *mc = &ctxt->mem_read;
1356
1357 if (mc->pos < mc->end)
1358 goto read_cached;
1359
1360 WARN_ON((mc->end + size) >= sizeof(mc->data));
1361
1362 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1363 &ctxt->exception);
1364 if (rc != X86EMUL_CONTINUE)
1365 return rc;
1366
1367 mc->end += size;
1368
1369read_cached:
1370 memcpy(dest, mc->data + mc->pos, size);
1371 mc->pos += size;
1372 return X86EMUL_CONTINUE;
1373}
1374
1375static int segmented_read(struct x86_emulate_ctxt *ctxt,
1376 struct segmented_address addr,
1377 void *data,
1378 unsigned size)
1379{
1380 int rc;
1381 ulong linear;
1382
1383 rc = linearize(ctxt, addr, size, false, &linear);
1384 if (rc != X86EMUL_CONTINUE)
1385 return rc;
1386 return read_emulated(ctxt, linear, data, size);
1387}
1388
1389static int segmented_write(struct x86_emulate_ctxt *ctxt,
1390 struct segmented_address addr,
1391 const void *data,
1392 unsigned size)
1393{
1394 int rc;
1395 ulong linear;
1396
1397 rc = linearize(ctxt, addr, size, true, &linear);
1398 if (rc != X86EMUL_CONTINUE)
1399 return rc;
1400 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1401 &ctxt->exception);
1402}
1403
1404static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1405 struct segmented_address addr,
1406 const void *orig_data, const void *data,
1407 unsigned size)
1408{
1409 int rc;
1410 ulong linear;
1411
1412 rc = linearize(ctxt, addr, size, true, &linear);
1413 if (rc != X86EMUL_CONTINUE)
1414 return rc;
1415 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1416 size, &ctxt->exception);
1417}
1418
1419static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1420 unsigned int size, unsigned short port,
1421 void *dest)
1422{
1423 struct read_cache *rc = &ctxt->io_read;
1424
1425 if (rc->pos == rc->end) { /* refill pio read ahead */
1426 unsigned int in_page, n;
1427 unsigned int count = ctxt->rep_prefix ?
1428 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1429 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1430 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1431 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1432 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1433 if (n == 0)
1434 n = 1;
1435 rc->pos = rc->end = 0;
1436 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1437 return 0;
1438 rc->end = n * size;
1439 }
1440
1441 if (ctxt->rep_prefix && (ctxt->d & String) &&
1442 !(ctxt->eflags & X86_EFLAGS_DF)) {
1443 ctxt->dst.data = rc->data + rc->pos;
1444 ctxt->dst.type = OP_MEM_STR;
1445 ctxt->dst.count = (rc->end - rc->pos) / size;
1446 rc->pos = rc->end;
1447 } else {
1448 memcpy(dest, rc->data + rc->pos, size);
1449 rc->pos += size;
1450 }
1451 return 1;
1452}
1453
1454static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1455 u16 index, struct desc_struct *desc)
1456{
1457 struct desc_ptr dt;
1458 ulong addr;
1459
1460 ctxt->ops->get_idt(ctxt, &dt);
1461
1462 if (dt.size < index * 8 + 7)
1463 return emulate_gp(ctxt, index << 3 | 0x2);
1464
1465 addr = dt.address + index * 8;
1466 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1467}
1468
1469static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1470 u16 selector, struct desc_ptr *dt)
1471{
1472 const struct x86_emulate_ops *ops = ctxt->ops;
1473 u32 base3 = 0;
1474
1475 if (selector & 1 << 2) {
1476 struct desc_struct desc;
1477 u16 sel;
1478
1479 memset(dt, 0, sizeof(*dt));
1480 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1481 VCPU_SREG_LDTR))
1482 return;
1483
1484 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1485 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1486 } else
1487 ops->get_gdt(ctxt, dt);
1488}
1489
1490static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1491 u16 selector, ulong *desc_addr_p)
1492{
1493 struct desc_ptr dt;
1494 u16 index = selector >> 3;
1495 ulong addr;
1496
1497 get_descriptor_table_ptr(ctxt, selector, &dt);
1498
1499 if (dt.size < index * 8 + 7)
1500 return emulate_gp(ctxt, selector & 0xfffc);
1501
1502 addr = dt.address + index * 8;
1503
1504#ifdef CONFIG_X86_64
1505 if (addr >> 32 != 0) {
1506 u64 efer = 0;
1507
1508 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1509 if (!(efer & EFER_LMA))
1510 addr &= (u32)-1;
1511 }
1512#endif
1513
1514 *desc_addr_p = addr;
1515 return X86EMUL_CONTINUE;
1516}
1517
1518/* allowed just for 8 bytes segments */
1519static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1520 u16 selector, struct desc_struct *desc,
1521 ulong *desc_addr_p)
1522{
1523 int rc;
1524
1525 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1526 if (rc != X86EMUL_CONTINUE)
1527 return rc;
1528
1529 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1530}
1531
1532/* allowed just for 8 bytes segments */
1533static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1534 u16 selector, struct desc_struct *desc)
1535{
1536 int rc;
1537 ulong addr;
1538
1539 rc = get_descriptor_ptr(ctxt, selector, &addr);
1540 if (rc != X86EMUL_CONTINUE)
1541 return rc;
1542
1543 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1544}
1545
1546static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1547 u16 selector, int seg, u8 cpl,
1548 enum x86_transfer_type transfer,
1549 struct desc_struct *desc)
1550{
1551 struct desc_struct seg_desc, old_desc;
1552 u8 dpl, rpl;
1553 unsigned err_vec = GP_VECTOR;
1554 u32 err_code = 0;
1555 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1556 ulong desc_addr;
1557 int ret;
1558 u16 dummy;
1559 u32 base3 = 0;
1560
1561 memset(&seg_desc, 0, sizeof(seg_desc));
1562
1563 if (ctxt->mode == X86EMUL_MODE_REAL) {
1564 /* set real mode segment descriptor (keep limit etc. for
1565 * unreal mode) */
1566 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1567 set_desc_base(&seg_desc, selector << 4);
1568 goto load;
1569 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1570 /* VM86 needs a clean new segment descriptor */
1571 set_desc_base(&seg_desc, selector << 4);
1572 set_desc_limit(&seg_desc, 0xffff);
1573 seg_desc.type = 3;
1574 seg_desc.p = 1;
1575 seg_desc.s = 1;
1576 seg_desc.dpl = 3;
1577 goto load;
1578 }
1579
1580 rpl = selector & 3;
1581
1582 /* TR should be in GDT only */
1583 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1584 goto exception;
1585
1586 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1587 if (null_selector) {
1588 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1589 goto exception;
1590
1591 if (seg == VCPU_SREG_SS) {
1592 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1593 goto exception;
1594
1595 /*
1596 * ctxt->ops->set_segment expects the CPL to be in
1597 * SS.DPL, so fake an expand-up 32-bit data segment.
1598 */
1599 seg_desc.type = 3;
1600 seg_desc.p = 1;
1601 seg_desc.s = 1;
1602 seg_desc.dpl = cpl;
1603 seg_desc.d = 1;
1604 seg_desc.g = 1;
1605 }
1606
1607 /* Skip all following checks */
1608 goto load;
1609 }
1610
1611 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1612 if (ret != X86EMUL_CONTINUE)
1613 return ret;
1614
1615 err_code = selector & 0xfffc;
1616 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1617 GP_VECTOR;
1618
1619 /* can't load system descriptor into segment selector */
1620 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1621 if (transfer == X86_TRANSFER_CALL_JMP)
1622 return X86EMUL_UNHANDLEABLE;
1623 goto exception;
1624 }
1625
1626 if (!seg_desc.p) {
1627 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1628 goto exception;
1629 }
1630
1631 dpl = seg_desc.dpl;
1632
1633 switch (seg) {
1634 case VCPU_SREG_SS:
1635 /*
1636 * segment is not a writable data segment or segment
1637 * selector's RPL != CPL or segment selector's RPL != CPL
1638 */
1639 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1640 goto exception;
1641 break;
1642 case VCPU_SREG_CS:
1643 if (!(seg_desc.type & 8))
1644 goto exception;
1645
1646 if (seg_desc.type & 4) {
1647 /* conforming */
1648 if (dpl > cpl)
1649 goto exception;
1650 } else {
1651 /* nonconforming */
1652 if (rpl > cpl || dpl != cpl)
1653 goto exception;
1654 }
1655 /* in long-mode d/b must be clear if l is set */
1656 if (seg_desc.d && seg_desc.l) {
1657 u64 efer = 0;
1658
1659 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1660 if (efer & EFER_LMA)
1661 goto exception;
1662 }
1663
1664 /* CS(RPL) <- CPL */
1665 selector = (selector & 0xfffc) | cpl;
1666 break;
1667 case VCPU_SREG_TR:
1668 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1669 goto exception;
1670 old_desc = seg_desc;
1671 seg_desc.type |= 2; /* busy */
1672 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1673 sizeof(seg_desc), &ctxt->exception);
1674 if (ret != X86EMUL_CONTINUE)
1675 return ret;
1676 break;
1677 case VCPU_SREG_LDTR:
1678 if (seg_desc.s || seg_desc.type != 2)
1679 goto exception;
1680 break;
1681 default: /* DS, ES, FS, or GS */
1682 /*
1683 * segment is not a data or readable code segment or
1684 * ((segment is a data or nonconforming code segment)
1685 * and (both RPL and CPL > DPL))
1686 */
1687 if ((seg_desc.type & 0xa) == 0x8 ||
1688 (((seg_desc.type & 0xc) != 0xc) &&
1689 (rpl > dpl && cpl > dpl)))
1690 goto exception;
1691 break;
1692 }
1693
1694 if (seg_desc.s) {
1695 /* mark segment as accessed */
1696 if (!(seg_desc.type & 1)) {
1697 seg_desc.type |= 1;
1698 ret = write_segment_descriptor(ctxt, selector,
1699 &seg_desc);
1700 if (ret != X86EMUL_CONTINUE)
1701 return ret;
1702 }
1703 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1704 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1705 if (ret != X86EMUL_CONTINUE)
1706 return ret;
1707 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1708 ((u64)base3 << 32), ctxt))
1709 return emulate_gp(ctxt, 0);
1710 }
1711load:
1712 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1713 if (desc)
1714 *desc = seg_desc;
1715 return X86EMUL_CONTINUE;
1716exception:
1717 return emulate_exception(ctxt, err_vec, err_code, true);
1718}
1719
1720static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1721 u16 selector, int seg)
1722{
1723 u8 cpl = ctxt->ops->cpl(ctxt);
1724
1725 /*
1726 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1727 * they can load it at CPL<3 (Intel's manual says only LSS can,
1728 * but it's wrong).
1729 *
1730 * However, the Intel manual says that putting IST=1/DPL=3 in
1731 * an interrupt gate will result in SS=3 (the AMD manual instead
1732 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1733 * and only forbid it here.
1734 */
1735 if (seg == VCPU_SREG_SS && selector == 3 &&
1736 ctxt->mode == X86EMUL_MODE_PROT64)
1737 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1738
1739 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1740 X86_TRANSFER_NONE, NULL);
1741}
1742
1743static void write_register_operand(struct operand *op)
1744{
1745 return assign_register(op->addr.reg, op->val, op->bytes);
1746}
1747
1748static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1749{
1750 switch (op->type) {
1751 case OP_REG:
1752 write_register_operand(op);
1753 break;
1754 case OP_MEM:
1755 if (ctxt->lock_prefix)
1756 return segmented_cmpxchg(ctxt,
1757 op->addr.mem,
1758 &op->orig_val,
1759 &op->val,
1760 op->bytes);
1761 else
1762 return segmented_write(ctxt,
1763 op->addr.mem,
1764 &op->val,
1765 op->bytes);
1766 break;
1767 case OP_MEM_STR:
1768 return segmented_write(ctxt,
1769 op->addr.mem,
1770 op->data,
1771 op->bytes * op->count);
1772 break;
1773 case OP_XMM:
1774 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1775 break;
1776 case OP_MM:
1777 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1778 break;
1779 case OP_NONE:
1780 /* no writeback */
1781 break;
1782 default:
1783 break;
1784 }
1785 return X86EMUL_CONTINUE;
1786}
1787
1788static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1789{
1790 struct segmented_address addr;
1791
1792 rsp_increment(ctxt, -bytes);
1793 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1794 addr.seg = VCPU_SREG_SS;
1795
1796 return segmented_write(ctxt, addr, data, bytes);
1797}
1798
1799static int em_push(struct x86_emulate_ctxt *ctxt)
1800{
1801 /* Disable writeback. */
1802 ctxt->dst.type = OP_NONE;
1803 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1804}
1805
1806static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1807 void *dest, int len)
1808{
1809 int rc;
1810 struct segmented_address addr;
1811
1812 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1813 addr.seg = VCPU_SREG_SS;
1814 rc = segmented_read(ctxt, addr, dest, len);
1815 if (rc != X86EMUL_CONTINUE)
1816 return rc;
1817
1818 rsp_increment(ctxt, len);
1819 return rc;
1820}
1821
1822static int em_pop(struct x86_emulate_ctxt *ctxt)
1823{
1824 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1825}
1826
1827static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1828 void *dest, int len)
1829{
1830 int rc;
1831 unsigned long val, change_mask;
1832 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1833 int cpl = ctxt->ops->cpl(ctxt);
1834
1835 rc = emulate_pop(ctxt, &val, len);
1836 if (rc != X86EMUL_CONTINUE)
1837 return rc;
1838
1839 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1840 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1841 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1842 X86_EFLAGS_AC | X86_EFLAGS_ID;
1843
1844 switch(ctxt->mode) {
1845 case X86EMUL_MODE_PROT64:
1846 case X86EMUL_MODE_PROT32:
1847 case X86EMUL_MODE_PROT16:
1848 if (cpl == 0)
1849 change_mask |= X86_EFLAGS_IOPL;
1850 if (cpl <= iopl)
1851 change_mask |= X86_EFLAGS_IF;
1852 break;
1853 case X86EMUL_MODE_VM86:
1854 if (iopl < 3)
1855 return emulate_gp(ctxt, 0);
1856 change_mask |= X86_EFLAGS_IF;
1857 break;
1858 default: /* real mode */
1859 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1860 break;
1861 }
1862
1863 *(unsigned long *)dest =
1864 (ctxt->eflags & ~change_mask) | (val & change_mask);
1865
1866 return rc;
1867}
1868
1869static int em_popf(struct x86_emulate_ctxt *ctxt)
1870{
1871 ctxt->dst.type = OP_REG;
1872 ctxt->dst.addr.reg = &ctxt->eflags;
1873 ctxt->dst.bytes = ctxt->op_bytes;
1874 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1875}
1876
1877static int em_enter(struct x86_emulate_ctxt *ctxt)
1878{
1879 int rc;
1880 unsigned frame_size = ctxt->src.val;
1881 unsigned nesting_level = ctxt->src2.val & 31;
1882 ulong rbp;
1883
1884 if (nesting_level)
1885 return X86EMUL_UNHANDLEABLE;
1886
1887 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1888 rc = push(ctxt, &rbp, stack_size(ctxt));
1889 if (rc != X86EMUL_CONTINUE)
1890 return rc;
1891 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1892 stack_mask(ctxt));
1893 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1894 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1895 stack_mask(ctxt));
1896 return X86EMUL_CONTINUE;
1897}
1898
1899static int em_leave(struct x86_emulate_ctxt *ctxt)
1900{
1901 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1902 stack_mask(ctxt));
1903 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1904}
1905
1906static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1907{
1908 int seg = ctxt->src2.val;
1909
1910 ctxt->src.val = get_segment_selector(ctxt, seg);
1911 if (ctxt->op_bytes == 4) {
1912 rsp_increment(ctxt, -2);
1913 ctxt->op_bytes = 2;
1914 }
1915
1916 return em_push(ctxt);
1917}
1918
1919static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1920{
1921 int seg = ctxt->src2.val;
1922 unsigned long selector;
1923 int rc;
1924
1925 rc = emulate_pop(ctxt, &selector, 2);
1926 if (rc != X86EMUL_CONTINUE)
1927 return rc;
1928
1929 if (ctxt->modrm_reg == VCPU_SREG_SS)
1930 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1931 if (ctxt->op_bytes > 2)
1932 rsp_increment(ctxt, ctxt->op_bytes - 2);
1933
1934 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1935 return rc;
1936}
1937
1938static int em_pusha(struct x86_emulate_ctxt *ctxt)
1939{
1940 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1941 int rc = X86EMUL_CONTINUE;
1942 int reg = VCPU_REGS_RAX;
1943
1944 while (reg <= VCPU_REGS_RDI) {
1945 (reg == VCPU_REGS_RSP) ?
1946 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1947
1948 rc = em_push(ctxt);
1949 if (rc != X86EMUL_CONTINUE)
1950 return rc;
1951
1952 ++reg;
1953 }
1954
1955 return rc;
1956}
1957
1958static int em_pushf(struct x86_emulate_ctxt *ctxt)
1959{
1960 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1961 return em_push(ctxt);
1962}
1963
1964static int em_popa(struct x86_emulate_ctxt *ctxt)
1965{
1966 int rc = X86EMUL_CONTINUE;
1967 int reg = VCPU_REGS_RDI;
1968 u32 val;
1969
1970 while (reg >= VCPU_REGS_RAX) {
1971 if (reg == VCPU_REGS_RSP) {
1972 rsp_increment(ctxt, ctxt->op_bytes);
1973 --reg;
1974 }
1975
1976 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1977 if (rc != X86EMUL_CONTINUE)
1978 break;
1979 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1980 --reg;
1981 }
1982 return rc;
1983}
1984
1985static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1986{
1987 const struct x86_emulate_ops *ops = ctxt->ops;
1988 int rc;
1989 struct desc_ptr dt;
1990 gva_t cs_addr;
1991 gva_t eip_addr;
1992 u16 cs, eip;
1993
1994 /* TODO: Add limit checks */
1995 ctxt->src.val = ctxt->eflags;
1996 rc = em_push(ctxt);
1997 if (rc != X86EMUL_CONTINUE)
1998 return rc;
1999
2000 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2001
2002 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2003 rc = em_push(ctxt);
2004 if (rc != X86EMUL_CONTINUE)
2005 return rc;
2006
2007 ctxt->src.val = ctxt->_eip;
2008 rc = em_push(ctxt);
2009 if (rc != X86EMUL_CONTINUE)
2010 return rc;
2011
2012 ops->get_idt(ctxt, &dt);
2013
2014 eip_addr = dt.address + (irq << 2);
2015 cs_addr = dt.address + (irq << 2) + 2;
2016
2017 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2018 if (rc != X86EMUL_CONTINUE)
2019 return rc;
2020
2021 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2022 if (rc != X86EMUL_CONTINUE)
2023 return rc;
2024
2025 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2026 if (rc != X86EMUL_CONTINUE)
2027 return rc;
2028
2029 ctxt->_eip = eip;
2030
2031 return rc;
2032}
2033
2034int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2035{
2036 int rc;
2037
2038 invalidate_registers(ctxt);
2039 rc = __emulate_int_real(ctxt, irq);
2040 if (rc == X86EMUL_CONTINUE)
2041 writeback_registers(ctxt);
2042 return rc;
2043}
2044
2045static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2046{
2047 switch(ctxt->mode) {
2048 case X86EMUL_MODE_REAL:
2049 return __emulate_int_real(ctxt, irq);
2050 case X86EMUL_MODE_VM86:
2051 case X86EMUL_MODE_PROT16:
2052 case X86EMUL_MODE_PROT32:
2053 case X86EMUL_MODE_PROT64:
2054 default:
2055 /* Protected mode interrupts unimplemented yet */
2056 return X86EMUL_UNHANDLEABLE;
2057 }
2058}
2059
2060static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2061{
2062 int rc = X86EMUL_CONTINUE;
2063 unsigned long temp_eip = 0;
2064 unsigned long temp_eflags = 0;
2065 unsigned long cs = 0;
2066 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2067 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2068 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2069 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2070 X86_EFLAGS_AC | X86_EFLAGS_ID |
2071 X86_EFLAGS_FIXED;
2072 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2073 X86_EFLAGS_VIP;
2074
2075 /* TODO: Add stack limit check */
2076
2077 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2078
2079 if (rc != X86EMUL_CONTINUE)
2080 return rc;
2081
2082 if (temp_eip & ~0xffff)
2083 return emulate_gp(ctxt, 0);
2084
2085 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2086
2087 if (rc != X86EMUL_CONTINUE)
2088 return rc;
2089
2090 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2091
2092 if (rc != X86EMUL_CONTINUE)
2093 return rc;
2094
2095 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2096
2097 if (rc != X86EMUL_CONTINUE)
2098 return rc;
2099
2100 ctxt->_eip = temp_eip;
2101
2102 if (ctxt->op_bytes == 4)
2103 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2104 else if (ctxt->op_bytes == 2) {
2105 ctxt->eflags &= ~0xffff;
2106 ctxt->eflags |= temp_eflags;
2107 }
2108
2109 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2110 ctxt->eflags |= X86_EFLAGS_FIXED;
2111 ctxt->ops->set_nmi_mask(ctxt, false);
2112
2113 return rc;
2114}
2115
2116static int em_iret(struct x86_emulate_ctxt *ctxt)
2117{
2118 switch(ctxt->mode) {
2119 case X86EMUL_MODE_REAL:
2120 return emulate_iret_real(ctxt);
2121 case X86EMUL_MODE_VM86:
2122 case X86EMUL_MODE_PROT16:
2123 case X86EMUL_MODE_PROT32:
2124 case X86EMUL_MODE_PROT64:
2125 default:
2126 /* iret from protected mode unimplemented yet */
2127 return X86EMUL_UNHANDLEABLE;
2128 }
2129}
2130
2131static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2132{
2133 int rc;
2134 unsigned short sel;
2135 struct desc_struct new_desc;
2136 u8 cpl = ctxt->ops->cpl(ctxt);
2137
2138 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2139
2140 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2141 X86_TRANSFER_CALL_JMP,
2142 &new_desc);
2143 if (rc != X86EMUL_CONTINUE)
2144 return rc;
2145
2146 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2147 /* Error handling is not implemented. */
2148 if (rc != X86EMUL_CONTINUE)
2149 return X86EMUL_UNHANDLEABLE;
2150
2151 return rc;
2152}
2153
2154static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2155{
2156 return assign_eip_near(ctxt, ctxt->src.val);
2157}
2158
2159static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2160{
2161 int rc;
2162 long int old_eip;
2163
2164 old_eip = ctxt->_eip;
2165 rc = assign_eip_near(ctxt, ctxt->src.val);
2166 if (rc != X86EMUL_CONTINUE)
2167 return rc;
2168 ctxt->src.val = old_eip;
2169 rc = em_push(ctxt);
2170 return rc;
2171}
2172
2173static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2174{
2175 u64 old = ctxt->dst.orig_val64;
2176
2177 if (ctxt->dst.bytes == 16)
2178 return X86EMUL_UNHANDLEABLE;
2179
2180 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2181 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2182 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2183 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2184 ctxt->eflags &= ~X86_EFLAGS_ZF;
2185 } else {
2186 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2187 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2188
2189 ctxt->eflags |= X86_EFLAGS_ZF;
2190 }
2191 return X86EMUL_CONTINUE;
2192}
2193
2194static int em_ret(struct x86_emulate_ctxt *ctxt)
2195{
2196 int rc;
2197 unsigned long eip;
2198
2199 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2200 if (rc != X86EMUL_CONTINUE)
2201 return rc;
2202
2203 return assign_eip_near(ctxt, eip);
2204}
2205
2206static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2207{
2208 int rc;
2209 unsigned long eip, cs;
2210 int cpl = ctxt->ops->cpl(ctxt);
2211 struct desc_struct new_desc;
2212
2213 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2214 if (rc != X86EMUL_CONTINUE)
2215 return rc;
2216 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2217 if (rc != X86EMUL_CONTINUE)
2218 return rc;
2219 /* Outer-privilege level return is not implemented */
2220 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2221 return X86EMUL_UNHANDLEABLE;
2222 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2223 X86_TRANSFER_RET,
2224 &new_desc);
2225 if (rc != X86EMUL_CONTINUE)
2226 return rc;
2227 rc = assign_eip_far(ctxt, eip, &new_desc);
2228 /* Error handling is not implemented. */
2229 if (rc != X86EMUL_CONTINUE)
2230 return X86EMUL_UNHANDLEABLE;
2231
2232 return rc;
2233}
2234
2235static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2236{
2237 int rc;
2238
2239 rc = em_ret_far(ctxt);
2240 if (rc != X86EMUL_CONTINUE)
2241 return rc;
2242 rsp_increment(ctxt, ctxt->src.val);
2243 return X86EMUL_CONTINUE;
2244}
2245
2246static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2247{
2248 /* Save real source value, then compare EAX against destination. */
2249 ctxt->dst.orig_val = ctxt->dst.val;
2250 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2251 ctxt->src.orig_val = ctxt->src.val;
2252 ctxt->src.val = ctxt->dst.orig_val;
2253 fastop(ctxt, em_cmp);
2254
2255 if (ctxt->eflags & X86_EFLAGS_ZF) {
2256 /* Success: write back to memory; no update of EAX */
2257 ctxt->src.type = OP_NONE;
2258 ctxt->dst.val = ctxt->src.orig_val;
2259 } else {
2260 /* Failure: write the value we saw to EAX. */
2261 ctxt->src.type = OP_REG;
2262 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2263 ctxt->src.val = ctxt->dst.orig_val;
2264 /* Create write-cycle to dest by writing the same value */
2265 ctxt->dst.val = ctxt->dst.orig_val;
2266 }
2267 return X86EMUL_CONTINUE;
2268}
2269
2270static int em_lseg(struct x86_emulate_ctxt *ctxt)
2271{
2272 int seg = ctxt->src2.val;
2273 unsigned short sel;
2274 int rc;
2275
2276 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2277
2278 rc = load_segment_descriptor(ctxt, sel, seg);
2279 if (rc != X86EMUL_CONTINUE)
2280 return rc;
2281
2282 ctxt->dst.val = ctxt->src.val;
2283 return rc;
2284}
2285
2286static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2287{
2288#ifdef CONFIG_X86_64
2289 return ctxt->ops->guest_has_long_mode(ctxt);
2290#else
2291 return false;
2292#endif
2293}
2294
2295static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2296{
2297 desc->g = (flags >> 23) & 1;
2298 desc->d = (flags >> 22) & 1;
2299 desc->l = (flags >> 21) & 1;
2300 desc->avl = (flags >> 20) & 1;
2301 desc->p = (flags >> 15) & 1;
2302 desc->dpl = (flags >> 13) & 3;
2303 desc->s = (flags >> 12) & 1;
2304 desc->type = (flags >> 8) & 15;
2305}
2306
2307static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2308 int n)
2309{
2310 struct desc_struct desc;
2311 int offset;
2312 u16 selector;
2313
2314 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2315
2316 if (n < 3)
2317 offset = 0x7f84 + n * 12;
2318 else
2319 offset = 0x7f2c + (n - 3) * 12;
2320
2321 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2322 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2323 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2324 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2325 return X86EMUL_CONTINUE;
2326}
2327
2328#ifdef CONFIG_X86_64
2329static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2330 int n)
2331{
2332 struct desc_struct desc;
2333 int offset;
2334 u16 selector;
2335 u32 base3;
2336
2337 offset = 0x7e00 + n * 16;
2338
2339 selector = GET_SMSTATE(u16, smstate, offset);
2340 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2341 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2342 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2343 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2344
2345 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2346 return X86EMUL_CONTINUE;
2347}
2348#endif
2349
2350static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2351 u64 cr0, u64 cr3, u64 cr4)
2352{
2353 int bad;
2354 u64 pcid;
2355
2356 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2357 pcid = 0;
2358 if (cr4 & X86_CR4_PCIDE) {
2359 pcid = cr3 & 0xfff;
2360 cr3 &= ~0xfff;
2361 }
2362
2363 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2364 if (bad)
2365 return X86EMUL_UNHANDLEABLE;
2366
2367 /*
2368 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2369 * Then enable protected mode. However, PCID cannot be enabled
2370 * if EFER.LMA=0, so set it separately.
2371 */
2372 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2373 if (bad)
2374 return X86EMUL_UNHANDLEABLE;
2375
2376 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2377 if (bad)
2378 return X86EMUL_UNHANDLEABLE;
2379
2380 if (cr4 & X86_CR4_PCIDE) {
2381 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2382 if (bad)
2383 return X86EMUL_UNHANDLEABLE;
2384 if (pcid) {
2385 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2386 if (bad)
2387 return X86EMUL_UNHANDLEABLE;
2388 }
2389
2390 }
2391
2392 return X86EMUL_CONTINUE;
2393}
2394
2395static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2396 const char *smstate)
2397{
2398 struct desc_struct desc;
2399 struct desc_ptr dt;
2400 u16 selector;
2401 u32 val, cr0, cr3, cr4;
2402 int i;
2403
2404 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2405 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2406 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2407 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2408
2409 for (i = 0; i < 8; i++)
2410 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2411
2412 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2413
2414 if (ctxt->ops->set_dr(ctxt, 6, val))
2415 return X86EMUL_UNHANDLEABLE;
2416
2417 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2418
2419 if (ctxt->ops->set_dr(ctxt, 7, val))
2420 return X86EMUL_UNHANDLEABLE;
2421
2422 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2423 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2424 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2425 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2426 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2427
2428 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2429 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2430 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2431 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2432 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2433
2434 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2435 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2436 ctxt->ops->set_gdt(ctxt, &dt);
2437
2438 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2439 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2440 ctxt->ops->set_idt(ctxt, &dt);
2441
2442 for (i = 0; i < 6; i++) {
2443 int r = rsm_load_seg_32(ctxt, smstate, i);
2444 if (r != X86EMUL_CONTINUE)
2445 return r;
2446 }
2447
2448 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2449
2450 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2451
2452 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2453}
2454
2455#ifdef CONFIG_X86_64
2456static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2457 const char *smstate)
2458{
2459 struct desc_struct desc;
2460 struct desc_ptr dt;
2461 u64 val, cr0, cr3, cr4;
2462 u32 base3;
2463 u16 selector;
2464 int i, r;
2465
2466 for (i = 0; i < 16; i++)
2467 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2468
2469 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2470 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2471
2472 val = GET_SMSTATE(u64, smstate, 0x7f68);
2473
2474 if (ctxt->ops->set_dr(ctxt, 6, val))
2475 return X86EMUL_UNHANDLEABLE;
2476
2477 val = GET_SMSTATE(u64, smstate, 0x7f60);
2478
2479 if (ctxt->ops->set_dr(ctxt, 7, val))
2480 return X86EMUL_UNHANDLEABLE;
2481
2482 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2483 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2484 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2485 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2486 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2487
2488 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2489 return X86EMUL_UNHANDLEABLE;
2490
2491 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2492 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2493 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2494 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2495 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2496 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2497
2498 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2499 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2500 ctxt->ops->set_idt(ctxt, &dt);
2501
2502 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2503 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2504 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2505 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2506 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2507 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2508
2509 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2510 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2511 ctxt->ops->set_gdt(ctxt, &dt);
2512
2513 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2514 if (r != X86EMUL_CONTINUE)
2515 return r;
2516
2517 for (i = 0; i < 6; i++) {
2518 r = rsm_load_seg_64(ctxt, smstate, i);
2519 if (r != X86EMUL_CONTINUE)
2520 return r;
2521 }
2522
2523 return X86EMUL_CONTINUE;
2524}
2525#endif
2526
2527static int em_rsm(struct x86_emulate_ctxt *ctxt)
2528{
2529 unsigned long cr0, cr4, efer;
2530 char buf[512];
2531 u64 smbase;
2532 int ret;
2533
2534 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2535 return emulate_ud(ctxt);
2536
2537 smbase = ctxt->ops->get_smbase(ctxt);
2538
2539 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2540 if (ret != X86EMUL_CONTINUE)
2541 return X86EMUL_UNHANDLEABLE;
2542
2543 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2544 ctxt->ops->set_nmi_mask(ctxt, false);
2545
2546 ctxt->ops->exiting_smm(ctxt);
2547
2548 /*
2549 * Get back to real mode, to prepare a safe state in which to load
2550 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2551 * supports long mode.
2552 */
2553 if (emulator_has_longmode(ctxt)) {
2554 struct desc_struct cs_desc;
2555
2556 /* Zero CR4.PCIDE before CR0.PG. */
2557 cr4 = ctxt->ops->get_cr(ctxt, 4);
2558 if (cr4 & X86_CR4_PCIDE)
2559 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2560
2561 /* A 32-bit code segment is required to clear EFER.LMA. */
2562 memset(&cs_desc, 0, sizeof(cs_desc));
2563 cs_desc.type = 0xb;
2564 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2565 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2566 }
2567
2568 /* For the 64-bit case, this will clear EFER.LMA. */
2569 cr0 = ctxt->ops->get_cr(ctxt, 0);
2570 if (cr0 & X86_CR0_PE)
2571 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2572
2573 if (emulator_has_longmode(ctxt)) {
2574 /* Clear CR4.PAE before clearing EFER.LME. */
2575 cr4 = ctxt->ops->get_cr(ctxt, 4);
2576 if (cr4 & X86_CR4_PAE)
2577 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2578
2579 /* And finally go back to 32-bit mode. */
2580 efer = 0;
2581 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2582 }
2583
2584 /*
2585 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
2586 * state (e.g. enter guest mode) before loading state from the SMM
2587 * state-save area.
2588 */
2589 if (ctxt->ops->leave_smm(ctxt, buf))
2590 goto emulate_shutdown;
2591
2592#ifdef CONFIG_X86_64
2593 if (emulator_has_longmode(ctxt))
2594 ret = rsm_load_state_64(ctxt, buf);
2595 else
2596#endif
2597 ret = rsm_load_state_32(ctxt, buf);
2598
2599 if (ret != X86EMUL_CONTINUE)
2600 goto emulate_shutdown;
2601
2602 /*
2603 * Note, the ctxt->ops callbacks are responsible for handling side
2604 * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
2605 * runtime updates, etc... If that changes, e.g. this flow is moved
2606 * out of the emulator to make it look more like enter_smm(), then
2607 * those side effects need to be explicitly handled for both success
2608 * and shutdown.
2609 */
2610 return X86EMUL_CONTINUE;
2611
2612emulate_shutdown:
2613 ctxt->ops->triple_fault(ctxt);
2614 return X86EMUL_CONTINUE;
2615}
2616
2617static void
2618setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2619 struct desc_struct *cs, struct desc_struct *ss)
2620{
2621 cs->l = 0; /* will be adjusted later */
2622 set_desc_base(cs, 0); /* flat segment */
2623 cs->g = 1; /* 4kb granularity */
2624 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2625 cs->type = 0x0b; /* Read, Execute, Accessed */
2626 cs->s = 1;
2627 cs->dpl = 0; /* will be adjusted later */
2628 cs->p = 1;
2629 cs->d = 1;
2630 cs->avl = 0;
2631
2632 set_desc_base(ss, 0); /* flat segment */
2633 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2634 ss->g = 1; /* 4kb granularity */
2635 ss->s = 1;
2636 ss->type = 0x03; /* Read/Write, Accessed */
2637 ss->d = 1; /* 32bit stack segment */
2638 ss->dpl = 0;
2639 ss->p = 1;
2640 ss->l = 0;
2641 ss->avl = 0;
2642}
2643
2644static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2645{
2646 u32 eax, ebx, ecx, edx;
2647
2648 eax = ecx = 0;
2649 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2650 return is_guest_vendor_intel(ebx, ecx, edx);
2651}
2652
2653static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2654{
2655 const struct x86_emulate_ops *ops = ctxt->ops;
2656 u32 eax, ebx, ecx, edx;
2657
2658 /*
2659 * syscall should always be enabled in longmode - so only become
2660 * vendor specific (cpuid) if other modes are active...
2661 */
2662 if (ctxt->mode == X86EMUL_MODE_PROT64)
2663 return true;
2664
2665 eax = 0x00000000;
2666 ecx = 0x00000000;
2667 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2668 /*
2669 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2670 * 64bit guest with a 32bit compat-app running will #UD !! While this
2671 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2672 * AMD can't behave like Intel.
2673 */
2674 if (is_guest_vendor_intel(ebx, ecx, edx))
2675 return false;
2676
2677 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2678 is_guest_vendor_hygon(ebx, ecx, edx))
2679 return true;
2680
2681 /*
2682 * default: (not Intel, not AMD, not Hygon), apply Intel's
2683 * stricter rules...
2684 */
2685 return false;
2686}
2687
2688static int em_syscall(struct x86_emulate_ctxt *ctxt)
2689{
2690 const struct x86_emulate_ops *ops = ctxt->ops;
2691 struct desc_struct cs, ss;
2692 u64 msr_data;
2693 u16 cs_sel, ss_sel;
2694 u64 efer = 0;
2695
2696 /* syscall is not available in real mode */
2697 if (ctxt->mode == X86EMUL_MODE_REAL ||
2698 ctxt->mode == X86EMUL_MODE_VM86)
2699 return emulate_ud(ctxt);
2700
2701 if (!(em_syscall_is_enabled(ctxt)))
2702 return emulate_ud(ctxt);
2703
2704 ops->get_msr(ctxt, MSR_EFER, &efer);
2705 if (!(efer & EFER_SCE))
2706 return emulate_ud(ctxt);
2707
2708 setup_syscalls_segments(ctxt, &cs, &ss);
2709 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2710 msr_data >>= 32;
2711 cs_sel = (u16)(msr_data & 0xfffc);
2712 ss_sel = (u16)(msr_data + 8);
2713
2714 if (efer & EFER_LMA) {
2715 cs.d = 0;
2716 cs.l = 1;
2717 }
2718 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2719 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2720
2721 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2722 if (efer & EFER_LMA) {
2723#ifdef CONFIG_X86_64
2724 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2725
2726 ops->get_msr(ctxt,
2727 ctxt->mode == X86EMUL_MODE_PROT64 ?
2728 MSR_LSTAR : MSR_CSTAR, &msr_data);
2729 ctxt->_eip = msr_data;
2730
2731 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2732 ctxt->eflags &= ~msr_data;
2733 ctxt->eflags |= X86_EFLAGS_FIXED;
2734#endif
2735 } else {
2736 /* legacy mode */
2737 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2738 ctxt->_eip = (u32)msr_data;
2739
2740 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2741 }
2742
2743 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2744 return X86EMUL_CONTINUE;
2745}
2746
2747static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2748{
2749 const struct x86_emulate_ops *ops = ctxt->ops;
2750 struct desc_struct cs, ss;
2751 u64 msr_data;
2752 u16 cs_sel, ss_sel;
2753 u64 efer = 0;
2754
2755 ops->get_msr(ctxt, MSR_EFER, &efer);
2756 /* inject #GP if in real mode */
2757 if (ctxt->mode == X86EMUL_MODE_REAL)
2758 return emulate_gp(ctxt, 0);
2759
2760 /*
2761 * Not recognized on AMD in compat mode (but is recognized in legacy
2762 * mode).
2763 */
2764 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2765 && !vendor_intel(ctxt))
2766 return emulate_ud(ctxt);
2767
2768 /* sysenter/sysexit have not been tested in 64bit mode. */
2769 if (ctxt->mode == X86EMUL_MODE_PROT64)
2770 return X86EMUL_UNHANDLEABLE;
2771
2772 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2773 if ((msr_data & 0xfffc) == 0x0)
2774 return emulate_gp(ctxt, 0);
2775
2776 setup_syscalls_segments(ctxt, &cs, &ss);
2777 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2778 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2779 ss_sel = cs_sel + 8;
2780 if (efer & EFER_LMA) {
2781 cs.d = 0;
2782 cs.l = 1;
2783 }
2784
2785 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2786 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2787
2788 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2789 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2790
2791 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2792 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2793 (u32)msr_data;
2794 if (efer & EFER_LMA)
2795 ctxt->mode = X86EMUL_MODE_PROT64;
2796
2797 return X86EMUL_CONTINUE;
2798}
2799
2800static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2801{
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct cs, ss;
2804 u64 msr_data, rcx, rdx;
2805 int usermode;
2806 u16 cs_sel = 0, ss_sel = 0;
2807
2808 /* inject #GP if in real mode or Virtual 8086 mode */
2809 if (ctxt->mode == X86EMUL_MODE_REAL ||
2810 ctxt->mode == X86EMUL_MODE_VM86)
2811 return emulate_gp(ctxt, 0);
2812
2813 setup_syscalls_segments(ctxt, &cs, &ss);
2814
2815 if ((ctxt->rex_prefix & 0x8) != 0x0)
2816 usermode = X86EMUL_MODE_PROT64;
2817 else
2818 usermode = X86EMUL_MODE_PROT32;
2819
2820 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2821 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2822
2823 cs.dpl = 3;
2824 ss.dpl = 3;
2825 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2826 switch (usermode) {
2827 case X86EMUL_MODE_PROT32:
2828 cs_sel = (u16)(msr_data + 16);
2829 if ((msr_data & 0xfffc) == 0x0)
2830 return emulate_gp(ctxt, 0);
2831 ss_sel = (u16)(msr_data + 24);
2832 rcx = (u32)rcx;
2833 rdx = (u32)rdx;
2834 break;
2835 case X86EMUL_MODE_PROT64:
2836 cs_sel = (u16)(msr_data + 32);
2837 if (msr_data == 0x0)
2838 return emulate_gp(ctxt, 0);
2839 ss_sel = cs_sel + 8;
2840 cs.d = 0;
2841 cs.l = 1;
2842 if (emul_is_noncanonical_address(rcx, ctxt) ||
2843 emul_is_noncanonical_address(rdx, ctxt))
2844 return emulate_gp(ctxt, 0);
2845 break;
2846 }
2847 cs_sel |= SEGMENT_RPL_MASK;
2848 ss_sel |= SEGMENT_RPL_MASK;
2849
2850 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2851 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2852
2853 ctxt->_eip = rdx;
2854 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2855
2856 return X86EMUL_CONTINUE;
2857}
2858
2859static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2860{
2861 int iopl;
2862 if (ctxt->mode == X86EMUL_MODE_REAL)
2863 return false;
2864 if (ctxt->mode == X86EMUL_MODE_VM86)
2865 return true;
2866 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2867 return ctxt->ops->cpl(ctxt) > iopl;
2868}
2869
2870#define VMWARE_PORT_VMPORT (0x5658)
2871#define VMWARE_PORT_VMRPC (0x5659)
2872
2873static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2874 u16 port, u16 len)
2875{
2876 const struct x86_emulate_ops *ops = ctxt->ops;
2877 struct desc_struct tr_seg;
2878 u32 base3;
2879 int r;
2880 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2881 unsigned mask = (1 << len) - 1;
2882 unsigned long base;
2883
2884 /*
2885 * VMware allows access to these ports even if denied
2886 * by TSS I/O permission bitmap. Mimic behavior.
2887 */
2888 if (enable_vmware_backdoor &&
2889 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2890 return true;
2891
2892 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2893 if (!tr_seg.p)
2894 return false;
2895 if (desc_limit_scaled(&tr_seg) < 103)
2896 return false;
2897 base = get_desc_base(&tr_seg);
2898#ifdef CONFIG_X86_64
2899 base |= ((u64)base3) << 32;
2900#endif
2901 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2902 if (r != X86EMUL_CONTINUE)
2903 return false;
2904 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2905 return false;
2906 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2907 if (r != X86EMUL_CONTINUE)
2908 return false;
2909 if ((perm >> bit_idx) & mask)
2910 return false;
2911 return true;
2912}
2913
2914static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2915 u16 port, u16 len)
2916{
2917 if (ctxt->perm_ok)
2918 return true;
2919
2920 if (emulator_bad_iopl(ctxt))
2921 if (!emulator_io_port_access_allowed(ctxt, port, len))
2922 return false;
2923
2924 ctxt->perm_ok = true;
2925
2926 return true;
2927}
2928
2929static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2930{
2931 /*
2932 * Intel CPUs mask the counter and pointers in quite strange
2933 * manner when ECX is zero due to REP-string optimizations.
2934 */
2935#ifdef CONFIG_X86_64
2936 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2937 return;
2938
2939 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2940
2941 switch (ctxt->b) {
2942 case 0xa4: /* movsb */
2943 case 0xa5: /* movsd/w */
2944 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2945 fallthrough;
2946 case 0xaa: /* stosb */
2947 case 0xab: /* stosd/w */
2948 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2949 }
2950#endif
2951}
2952
2953static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2954 struct tss_segment_16 *tss)
2955{
2956 tss->ip = ctxt->_eip;
2957 tss->flag = ctxt->eflags;
2958 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2959 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2960 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2961 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2962 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2963 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2964 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2965 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2966
2967 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2968 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2969 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2970 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2971 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2972}
2973
2974static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2975 struct tss_segment_16 *tss)
2976{
2977 int ret;
2978 u8 cpl;
2979
2980 ctxt->_eip = tss->ip;
2981 ctxt->eflags = tss->flag | 2;
2982 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2983 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2984 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2985 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2986 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2987 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2988 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2989 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2990
2991 /*
2992 * SDM says that segment selectors are loaded before segment
2993 * descriptors
2994 */
2995 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2996 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2997 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2998 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2999 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3000
3001 cpl = tss->cs & 3;
3002
3003 /*
3004 * Now load segment descriptors. If fault happens at this stage
3005 * it is handled in a context of new task
3006 */
3007 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3008 X86_TRANSFER_TASK_SWITCH, NULL);
3009 if (ret != X86EMUL_CONTINUE)
3010 return ret;
3011 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3012 X86_TRANSFER_TASK_SWITCH, NULL);
3013 if (ret != X86EMUL_CONTINUE)
3014 return ret;
3015 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3016 X86_TRANSFER_TASK_SWITCH, NULL);
3017 if (ret != X86EMUL_CONTINUE)
3018 return ret;
3019 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3020 X86_TRANSFER_TASK_SWITCH, NULL);
3021 if (ret != X86EMUL_CONTINUE)
3022 return ret;
3023 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3024 X86_TRANSFER_TASK_SWITCH, NULL);
3025 if (ret != X86EMUL_CONTINUE)
3026 return ret;
3027
3028 return X86EMUL_CONTINUE;
3029}
3030
3031static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3032 u16 tss_selector, u16 old_tss_sel,
3033 ulong old_tss_base, struct desc_struct *new_desc)
3034{
3035 struct tss_segment_16 tss_seg;
3036 int ret;
3037 u32 new_tss_base = get_desc_base(new_desc);
3038
3039 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3040 if (ret != X86EMUL_CONTINUE)
3041 return ret;
3042
3043 save_state_to_tss16(ctxt, &tss_seg);
3044
3045 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3046 if (ret != X86EMUL_CONTINUE)
3047 return ret;
3048
3049 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3050 if (ret != X86EMUL_CONTINUE)
3051 return ret;
3052
3053 if (old_tss_sel != 0xffff) {
3054 tss_seg.prev_task_link = old_tss_sel;
3055
3056 ret = linear_write_system(ctxt, new_tss_base,
3057 &tss_seg.prev_task_link,
3058 sizeof(tss_seg.prev_task_link));
3059 if (ret != X86EMUL_CONTINUE)
3060 return ret;
3061 }
3062
3063 return load_state_from_tss16(ctxt, &tss_seg);
3064}
3065
3066static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3067 struct tss_segment_32 *tss)
3068{
3069 /* CR3 and ldt selector are not saved intentionally */
3070 tss->eip = ctxt->_eip;
3071 tss->eflags = ctxt->eflags;
3072 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3073 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3074 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3075 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3076 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3077 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3078 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3079 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3080
3081 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3082 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3083 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3084 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3085 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3086 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3087}
3088
3089static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3090 struct tss_segment_32 *tss)
3091{
3092 int ret;
3093 u8 cpl;
3094
3095 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3096 return emulate_gp(ctxt, 0);
3097 ctxt->_eip = tss->eip;
3098 ctxt->eflags = tss->eflags | 2;
3099
3100 /* General purpose registers */
3101 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3102 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3104 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3105 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3106 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3107 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3108 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3109
3110 /*
3111 * SDM says that segment selectors are loaded before segment
3112 * descriptors. This is important because CPL checks will
3113 * use CS.RPL.
3114 */
3115 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3116 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3117 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3118 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3119 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3120 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3121 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3122
3123 /*
3124 * If we're switching between Protected Mode and VM86, we need to make
3125 * sure to update the mode before loading the segment descriptors so
3126 * that the selectors are interpreted correctly.
3127 */
3128 if (ctxt->eflags & X86_EFLAGS_VM) {
3129 ctxt->mode = X86EMUL_MODE_VM86;
3130 cpl = 3;
3131 } else {
3132 ctxt->mode = X86EMUL_MODE_PROT32;
3133 cpl = tss->cs & 3;
3134 }
3135
3136 /*
3137 * Now load segment descriptors. If fault happens at this stage
3138 * it is handled in a context of new task
3139 */
3140 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3141 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3142 if (ret != X86EMUL_CONTINUE)
3143 return ret;
3144 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3145 X86_TRANSFER_TASK_SWITCH, NULL);
3146 if (ret != X86EMUL_CONTINUE)
3147 return ret;
3148 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3149 X86_TRANSFER_TASK_SWITCH, NULL);
3150 if (ret != X86EMUL_CONTINUE)
3151 return ret;
3152 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3153 X86_TRANSFER_TASK_SWITCH, NULL);
3154 if (ret != X86EMUL_CONTINUE)
3155 return ret;
3156 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3157 X86_TRANSFER_TASK_SWITCH, NULL);
3158 if (ret != X86EMUL_CONTINUE)
3159 return ret;
3160 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3161 X86_TRANSFER_TASK_SWITCH, NULL);
3162 if (ret != X86EMUL_CONTINUE)
3163 return ret;
3164 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3165 X86_TRANSFER_TASK_SWITCH, NULL);
3166
3167 return ret;
3168}
3169
3170static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3171 u16 tss_selector, u16 old_tss_sel,
3172 ulong old_tss_base, struct desc_struct *new_desc)
3173{
3174 struct tss_segment_32 tss_seg;
3175 int ret;
3176 u32 new_tss_base = get_desc_base(new_desc);
3177 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3178 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3179
3180 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3181 if (ret != X86EMUL_CONTINUE)
3182 return ret;
3183
3184 save_state_to_tss32(ctxt, &tss_seg);
3185
3186 /* Only GP registers and segment selectors are saved */
3187 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3188 ldt_sel_offset - eip_offset);
3189 if (ret != X86EMUL_CONTINUE)
3190 return ret;
3191
3192 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3193 if (ret != X86EMUL_CONTINUE)
3194 return ret;
3195
3196 if (old_tss_sel != 0xffff) {
3197 tss_seg.prev_task_link = old_tss_sel;
3198
3199 ret = linear_write_system(ctxt, new_tss_base,
3200 &tss_seg.prev_task_link,
3201 sizeof(tss_seg.prev_task_link));
3202 if (ret != X86EMUL_CONTINUE)
3203 return ret;
3204 }
3205
3206 return load_state_from_tss32(ctxt, &tss_seg);
3207}
3208
3209static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3210 u16 tss_selector, int idt_index, int reason,
3211 bool has_error_code, u32 error_code)
3212{
3213 const struct x86_emulate_ops *ops = ctxt->ops;
3214 struct desc_struct curr_tss_desc, next_tss_desc;
3215 int ret;
3216 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3217 ulong old_tss_base =
3218 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3219 u32 desc_limit;
3220 ulong desc_addr, dr7;
3221
3222 /* FIXME: old_tss_base == ~0 ? */
3223
3224 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3225 if (ret != X86EMUL_CONTINUE)
3226 return ret;
3227 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3228 if (ret != X86EMUL_CONTINUE)
3229 return ret;
3230
3231 /* FIXME: check that next_tss_desc is tss */
3232
3233 /*
3234 * Check privileges. The three cases are task switch caused by...
3235 *
3236 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3237 * 2. Exception/IRQ/iret: No check is performed
3238 * 3. jmp/call to TSS/task-gate: No check is performed since the
3239 * hardware checks it before exiting.
3240 */
3241 if (reason == TASK_SWITCH_GATE) {
3242 if (idt_index != -1) {
3243 /* Software interrupts */
3244 struct desc_struct task_gate_desc;
3245 int dpl;
3246
3247 ret = read_interrupt_descriptor(ctxt, idt_index,
3248 &task_gate_desc);
3249 if (ret != X86EMUL_CONTINUE)
3250 return ret;
3251
3252 dpl = task_gate_desc.dpl;
3253 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3254 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3255 }
3256 }
3257
3258 desc_limit = desc_limit_scaled(&next_tss_desc);
3259 if (!next_tss_desc.p ||
3260 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3261 desc_limit < 0x2b)) {
3262 return emulate_ts(ctxt, tss_selector & 0xfffc);
3263 }
3264
3265 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3266 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3267 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3268 }
3269
3270 if (reason == TASK_SWITCH_IRET)
3271 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3272
3273 /* set back link to prev task only if NT bit is set in eflags
3274 note that old_tss_sel is not used after this point */
3275 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3276 old_tss_sel = 0xffff;
3277
3278 if (next_tss_desc.type & 8)
3279 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3280 old_tss_base, &next_tss_desc);
3281 else
3282 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3283 old_tss_base, &next_tss_desc);
3284 if (ret != X86EMUL_CONTINUE)
3285 return ret;
3286
3287 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3288 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3289
3290 if (reason != TASK_SWITCH_IRET) {
3291 next_tss_desc.type |= (1 << 1); /* set busy flag */
3292 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3293 }
3294
3295 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3296 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3297
3298 if (has_error_code) {
3299 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3300 ctxt->lock_prefix = 0;
3301 ctxt->src.val = (unsigned long) error_code;
3302 ret = em_push(ctxt);
3303 }
3304
3305 ops->get_dr(ctxt, 7, &dr7);
3306 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3307
3308 return ret;
3309}
3310
3311int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3312 u16 tss_selector, int idt_index, int reason,
3313 bool has_error_code, u32 error_code)
3314{
3315 int rc;
3316
3317 invalidate_registers(ctxt);
3318 ctxt->_eip = ctxt->eip;
3319 ctxt->dst.type = OP_NONE;
3320
3321 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3322 has_error_code, error_code);
3323
3324 if (rc == X86EMUL_CONTINUE) {
3325 ctxt->eip = ctxt->_eip;
3326 writeback_registers(ctxt);
3327 }
3328
3329 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3330}
3331
3332static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3333 struct operand *op)
3334{
3335 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3336
3337 register_address_increment(ctxt, reg, df * op->bytes);
3338 op->addr.mem.ea = register_address(ctxt, reg);
3339}
3340
3341static int em_das(struct x86_emulate_ctxt *ctxt)
3342{
3343 u8 al, old_al;
3344 bool af, cf, old_cf;
3345
3346 cf = ctxt->eflags & X86_EFLAGS_CF;
3347 al = ctxt->dst.val;
3348
3349 old_al = al;
3350 old_cf = cf;
3351 cf = false;
3352 af = ctxt->eflags & X86_EFLAGS_AF;
3353 if ((al & 0x0f) > 9 || af) {
3354 al -= 6;
3355 cf = old_cf | (al >= 250);
3356 af = true;
3357 } else {
3358 af = false;
3359 }
3360 if (old_al > 0x99 || old_cf) {
3361 al -= 0x60;
3362 cf = true;
3363 }
3364
3365 ctxt->dst.val = al;
3366 /* Set PF, ZF, SF */
3367 ctxt->src.type = OP_IMM;
3368 ctxt->src.val = 0;
3369 ctxt->src.bytes = 1;
3370 fastop(ctxt, em_or);
3371 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3372 if (cf)
3373 ctxt->eflags |= X86_EFLAGS_CF;
3374 if (af)
3375 ctxt->eflags |= X86_EFLAGS_AF;
3376 return X86EMUL_CONTINUE;
3377}
3378
3379static int em_aam(struct x86_emulate_ctxt *ctxt)
3380{
3381 u8 al, ah;
3382
3383 if (ctxt->src.val == 0)
3384 return emulate_de(ctxt);
3385
3386 al = ctxt->dst.val & 0xff;
3387 ah = al / ctxt->src.val;
3388 al %= ctxt->src.val;
3389
3390 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3391
3392 /* Set PF, ZF, SF */
3393 ctxt->src.type = OP_IMM;
3394 ctxt->src.val = 0;
3395 ctxt->src.bytes = 1;
3396 fastop(ctxt, em_or);
3397
3398 return X86EMUL_CONTINUE;
3399}
3400
3401static int em_aad(struct x86_emulate_ctxt *ctxt)
3402{
3403 u8 al = ctxt->dst.val & 0xff;
3404 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3405
3406 al = (al + (ah * ctxt->src.val)) & 0xff;
3407
3408 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3409
3410 /* Set PF, ZF, SF */
3411 ctxt->src.type = OP_IMM;
3412 ctxt->src.val = 0;
3413 ctxt->src.bytes = 1;
3414 fastop(ctxt, em_or);
3415
3416 return X86EMUL_CONTINUE;
3417}
3418
3419static int em_call(struct x86_emulate_ctxt *ctxt)
3420{
3421 int rc;
3422 long rel = ctxt->src.val;
3423
3424 ctxt->src.val = (unsigned long)ctxt->_eip;
3425 rc = jmp_rel(ctxt, rel);
3426 if (rc != X86EMUL_CONTINUE)
3427 return rc;
3428 return em_push(ctxt);
3429}
3430
3431static int em_call_far(struct x86_emulate_ctxt *ctxt)
3432{
3433 u16 sel, old_cs;
3434 ulong old_eip;
3435 int rc;
3436 struct desc_struct old_desc, new_desc;
3437 const struct x86_emulate_ops *ops = ctxt->ops;
3438 int cpl = ctxt->ops->cpl(ctxt);
3439 enum x86emul_mode prev_mode = ctxt->mode;
3440
3441 old_eip = ctxt->_eip;
3442 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3443
3444 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3445 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3446 X86_TRANSFER_CALL_JMP, &new_desc);
3447 if (rc != X86EMUL_CONTINUE)
3448 return rc;
3449
3450 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3451 if (rc != X86EMUL_CONTINUE)
3452 goto fail;
3453
3454 ctxt->src.val = old_cs;
3455 rc = em_push(ctxt);
3456 if (rc != X86EMUL_CONTINUE)
3457 goto fail;
3458
3459 ctxt->src.val = old_eip;
3460 rc = em_push(ctxt);
3461 /* If we failed, we tainted the memory, but the very least we should
3462 restore cs */
3463 if (rc != X86EMUL_CONTINUE) {
3464 pr_warn_once("faulting far call emulation tainted memory\n");
3465 goto fail;
3466 }
3467 return rc;
3468fail:
3469 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3470 ctxt->mode = prev_mode;
3471 return rc;
3472
3473}
3474
3475static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3476{
3477 int rc;
3478 unsigned long eip;
3479
3480 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3481 if (rc != X86EMUL_CONTINUE)
3482 return rc;
3483 rc = assign_eip_near(ctxt, eip);
3484 if (rc != X86EMUL_CONTINUE)
3485 return rc;
3486 rsp_increment(ctxt, ctxt->src.val);
3487 return X86EMUL_CONTINUE;
3488}
3489
3490static int em_xchg(struct x86_emulate_ctxt *ctxt)
3491{
3492 /* Write back the register source. */
3493 ctxt->src.val = ctxt->dst.val;
3494 write_register_operand(&ctxt->src);
3495
3496 /* Write back the memory destination with implicit LOCK prefix. */
3497 ctxt->dst.val = ctxt->src.orig_val;
3498 ctxt->lock_prefix = 1;
3499 return X86EMUL_CONTINUE;
3500}
3501
3502static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3503{
3504 ctxt->dst.val = ctxt->src2.val;
3505 return fastop(ctxt, em_imul);
3506}
3507
3508static int em_cwd(struct x86_emulate_ctxt *ctxt)
3509{
3510 ctxt->dst.type = OP_REG;
3511 ctxt->dst.bytes = ctxt->src.bytes;
3512 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3513 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3514
3515 return X86EMUL_CONTINUE;
3516}
3517
3518static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3519{
3520 u64 tsc_aux = 0;
3521
3522 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3523 return emulate_ud(ctxt);
3524 ctxt->dst.val = tsc_aux;
3525 return X86EMUL_CONTINUE;
3526}
3527
3528static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3529{
3530 u64 tsc = 0;
3531
3532 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3533 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3534 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3535 return X86EMUL_CONTINUE;
3536}
3537
3538static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3539{
3540 u64 pmc;
3541
3542 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3543 return emulate_gp(ctxt, 0);
3544 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3545 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3546 return X86EMUL_CONTINUE;
3547}
3548
3549static int em_mov(struct x86_emulate_ctxt *ctxt)
3550{
3551 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3552 return X86EMUL_CONTINUE;
3553}
3554
3555static int em_movbe(struct x86_emulate_ctxt *ctxt)
3556{
3557 u16 tmp;
3558
3559 if (!ctxt->ops->guest_has_movbe(ctxt))
3560 return emulate_ud(ctxt);
3561
3562 switch (ctxt->op_bytes) {
3563 case 2:
3564 /*
3565 * From MOVBE definition: "...When the operand size is 16 bits,
3566 * the upper word of the destination register remains unchanged
3567 * ..."
3568 *
3569 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3570 * rules so we have to do the operation almost per hand.
3571 */
3572 tmp = (u16)ctxt->src.val;
3573 ctxt->dst.val &= ~0xffffUL;
3574 ctxt->dst.val |= (unsigned long)swab16(tmp);
3575 break;
3576 case 4:
3577 ctxt->dst.val = swab32((u32)ctxt->src.val);
3578 break;
3579 case 8:
3580 ctxt->dst.val = swab64(ctxt->src.val);
3581 break;
3582 default:
3583 BUG();
3584 }
3585 return X86EMUL_CONTINUE;
3586}
3587
3588static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3589{
3590 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3591 return emulate_gp(ctxt, 0);
3592
3593 /* Disable writeback. */
3594 ctxt->dst.type = OP_NONE;
3595 return X86EMUL_CONTINUE;
3596}
3597
3598static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3599{
3600 unsigned long val;
3601
3602 if (ctxt->mode == X86EMUL_MODE_PROT64)
3603 val = ctxt->src.val & ~0ULL;
3604 else
3605 val = ctxt->src.val & ~0U;
3606
3607 /* #UD condition is already handled. */
3608 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3609 return emulate_gp(ctxt, 0);
3610
3611 /* Disable writeback. */
3612 ctxt->dst.type = OP_NONE;
3613 return X86EMUL_CONTINUE;
3614}
3615
3616static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3617{
3618 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3619 u64 msr_data;
3620 int r;
3621
3622 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3623 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3624 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3625
3626 if (r == X86EMUL_IO_NEEDED)
3627 return r;
3628
3629 if (r > 0)
3630 return emulate_gp(ctxt, 0);
3631
3632 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3633}
3634
3635static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3636{
3637 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3638 u64 msr_data;
3639 int r;
3640
3641 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3642
3643 if (r == X86EMUL_IO_NEEDED)
3644 return r;
3645
3646 if (r)
3647 return emulate_gp(ctxt, 0);
3648
3649 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3650 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3651 return X86EMUL_CONTINUE;
3652}
3653
3654static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3655{
3656 if (segment > VCPU_SREG_GS &&
3657 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3658 ctxt->ops->cpl(ctxt) > 0)
3659 return emulate_gp(ctxt, 0);
3660
3661 ctxt->dst.val = get_segment_selector(ctxt, segment);
3662 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3663 ctxt->dst.bytes = 2;
3664 return X86EMUL_CONTINUE;
3665}
3666
3667static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3668{
3669 if (ctxt->modrm_reg > VCPU_SREG_GS)
3670 return emulate_ud(ctxt);
3671
3672 return em_store_sreg(ctxt, ctxt->modrm_reg);
3673}
3674
3675static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3676{
3677 u16 sel = ctxt->src.val;
3678
3679 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3680 return emulate_ud(ctxt);
3681
3682 if (ctxt->modrm_reg == VCPU_SREG_SS)
3683 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3684
3685 /* Disable writeback. */
3686 ctxt->dst.type = OP_NONE;
3687 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3688}
3689
3690static int em_sldt(struct x86_emulate_ctxt *ctxt)
3691{
3692 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3693}
3694
3695static int em_lldt(struct x86_emulate_ctxt *ctxt)
3696{
3697 u16 sel = ctxt->src.val;
3698
3699 /* Disable writeback. */
3700 ctxt->dst.type = OP_NONE;
3701 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3702}
3703
3704static int em_str(struct x86_emulate_ctxt *ctxt)
3705{
3706 return em_store_sreg(ctxt, VCPU_SREG_TR);
3707}
3708
3709static int em_ltr(struct x86_emulate_ctxt *ctxt)
3710{
3711 u16 sel = ctxt->src.val;
3712
3713 /* Disable writeback. */
3714 ctxt->dst.type = OP_NONE;
3715 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3716}
3717
3718static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3719{
3720 int rc;
3721 ulong linear;
3722
3723 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3724 if (rc == X86EMUL_CONTINUE)
3725 ctxt->ops->invlpg(ctxt, linear);
3726 /* Disable writeback. */
3727 ctxt->dst.type = OP_NONE;
3728 return X86EMUL_CONTINUE;
3729}
3730
3731static int em_clts(struct x86_emulate_ctxt *ctxt)
3732{
3733 ulong cr0;
3734
3735 cr0 = ctxt->ops->get_cr(ctxt, 0);
3736 cr0 &= ~X86_CR0_TS;
3737 ctxt->ops->set_cr(ctxt, 0, cr0);
3738 return X86EMUL_CONTINUE;
3739}
3740
3741static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3742{
3743 int rc = ctxt->ops->fix_hypercall(ctxt);
3744
3745 if (rc != X86EMUL_CONTINUE)
3746 return rc;
3747
3748 /* Let the processor re-execute the fixed hypercall */
3749 ctxt->_eip = ctxt->eip;
3750 /* Disable writeback. */
3751 ctxt->dst.type = OP_NONE;
3752 return X86EMUL_CONTINUE;
3753}
3754
3755static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3756 void (*get)(struct x86_emulate_ctxt *ctxt,
3757 struct desc_ptr *ptr))
3758{
3759 struct desc_ptr desc_ptr;
3760
3761 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3762 ctxt->ops->cpl(ctxt) > 0)
3763 return emulate_gp(ctxt, 0);
3764
3765 if (ctxt->mode == X86EMUL_MODE_PROT64)
3766 ctxt->op_bytes = 8;
3767 get(ctxt, &desc_ptr);
3768 if (ctxt->op_bytes == 2) {
3769 ctxt->op_bytes = 4;
3770 desc_ptr.address &= 0x00ffffff;
3771 }
3772 /* Disable writeback. */
3773 ctxt->dst.type = OP_NONE;
3774 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3775 &desc_ptr, 2 + ctxt->op_bytes);
3776}
3777
3778static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3779{
3780 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3781}
3782
3783static int em_sidt(struct x86_emulate_ctxt *ctxt)
3784{
3785 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3786}
3787
3788static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3789{
3790 struct desc_ptr desc_ptr;
3791 int rc;
3792
3793 if (ctxt->mode == X86EMUL_MODE_PROT64)
3794 ctxt->op_bytes = 8;
3795 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3796 &desc_ptr.size, &desc_ptr.address,
3797 ctxt->op_bytes);
3798 if (rc != X86EMUL_CONTINUE)
3799 return rc;
3800 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3801 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3802 return emulate_gp(ctxt, 0);
3803 if (lgdt)
3804 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3805 else
3806 ctxt->ops->set_idt(ctxt, &desc_ptr);
3807 /* Disable writeback. */
3808 ctxt->dst.type = OP_NONE;
3809 return X86EMUL_CONTINUE;
3810}
3811
3812static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3813{
3814 return em_lgdt_lidt(ctxt, true);
3815}
3816
3817static int em_lidt(struct x86_emulate_ctxt *ctxt)
3818{
3819 return em_lgdt_lidt(ctxt, false);
3820}
3821
3822static int em_smsw(struct x86_emulate_ctxt *ctxt)
3823{
3824 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3825 ctxt->ops->cpl(ctxt) > 0)
3826 return emulate_gp(ctxt, 0);
3827
3828 if (ctxt->dst.type == OP_MEM)
3829 ctxt->dst.bytes = 2;
3830 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3831 return X86EMUL_CONTINUE;
3832}
3833
3834static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3835{
3836 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3837 | (ctxt->src.val & 0x0f));
3838 ctxt->dst.type = OP_NONE;
3839 return X86EMUL_CONTINUE;
3840}
3841
3842static int em_loop(struct x86_emulate_ctxt *ctxt)
3843{
3844 int rc = X86EMUL_CONTINUE;
3845
3846 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3847 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3848 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3849 rc = jmp_rel(ctxt, ctxt->src.val);
3850
3851 return rc;
3852}
3853
3854static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3855{
3856 int rc = X86EMUL_CONTINUE;
3857
3858 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3859 rc = jmp_rel(ctxt, ctxt->src.val);
3860
3861 return rc;
3862}
3863
3864static int em_in(struct x86_emulate_ctxt *ctxt)
3865{
3866 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3867 &ctxt->dst.val))
3868 return X86EMUL_IO_NEEDED;
3869
3870 return X86EMUL_CONTINUE;
3871}
3872
3873static int em_out(struct x86_emulate_ctxt *ctxt)
3874{
3875 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3876 &ctxt->src.val, 1);
3877 /* Disable writeback. */
3878 ctxt->dst.type = OP_NONE;
3879 return X86EMUL_CONTINUE;
3880}
3881
3882static int em_cli(struct x86_emulate_ctxt *ctxt)
3883{
3884 if (emulator_bad_iopl(ctxt))
3885 return emulate_gp(ctxt, 0);
3886
3887 ctxt->eflags &= ~X86_EFLAGS_IF;
3888 return X86EMUL_CONTINUE;
3889}
3890
3891static int em_sti(struct x86_emulate_ctxt *ctxt)
3892{
3893 if (emulator_bad_iopl(ctxt))
3894 return emulate_gp(ctxt, 0);
3895
3896 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3897 ctxt->eflags |= X86_EFLAGS_IF;
3898 return X86EMUL_CONTINUE;
3899}
3900
3901static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3902{
3903 u32 eax, ebx, ecx, edx;
3904 u64 msr = 0;
3905
3906 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3907 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3908 ctxt->ops->cpl(ctxt)) {
3909 return emulate_gp(ctxt, 0);
3910 }
3911
3912 eax = reg_read(ctxt, VCPU_REGS_RAX);
3913 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3914 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3915 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3916 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3917 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3918 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3919 return X86EMUL_CONTINUE;
3920}
3921
3922static int em_sahf(struct x86_emulate_ctxt *ctxt)
3923{
3924 u32 flags;
3925
3926 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3927 X86_EFLAGS_SF;
3928 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3929
3930 ctxt->eflags &= ~0xffUL;
3931 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3932 return X86EMUL_CONTINUE;
3933}
3934
3935static int em_lahf(struct x86_emulate_ctxt *ctxt)
3936{
3937 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3938 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3939 return X86EMUL_CONTINUE;
3940}
3941
3942static int em_bswap(struct x86_emulate_ctxt *ctxt)
3943{
3944 switch (ctxt->op_bytes) {
3945#ifdef CONFIG_X86_64
3946 case 8:
3947 asm("bswap %0" : "+r"(ctxt->dst.val));
3948 break;
3949#endif
3950 default:
3951 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3952 break;
3953 }
3954 return X86EMUL_CONTINUE;
3955}
3956
3957static int em_clflush(struct x86_emulate_ctxt *ctxt)
3958{
3959 /* emulating clflush regardless of cpuid */
3960 return X86EMUL_CONTINUE;
3961}
3962
3963static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3964{
3965 /* emulating clflushopt regardless of cpuid */
3966 return X86EMUL_CONTINUE;
3967}
3968
3969static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3970{
3971 ctxt->dst.val = (s32) ctxt->src.val;
3972 return X86EMUL_CONTINUE;
3973}
3974
3975static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3976{
3977 if (!ctxt->ops->guest_has_fxsr(ctxt))
3978 return emulate_ud(ctxt);
3979
3980 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3981 return emulate_nm(ctxt);
3982
3983 /*
3984 * Don't emulate a case that should never be hit, instead of working
3985 * around a lack of fxsave64/fxrstor64 on old compilers.
3986 */
3987 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3988 return X86EMUL_UNHANDLEABLE;
3989
3990 return X86EMUL_CONTINUE;
3991}
3992
3993/*
3994 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3995 * and restore MXCSR.
3996 */
3997static size_t __fxstate_size(int nregs)
3998{
3999 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4000}
4001
4002static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4003{
4004 bool cr4_osfxsr;
4005 if (ctxt->mode == X86EMUL_MODE_PROT64)
4006 return __fxstate_size(16);
4007
4008 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4009 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4010}
4011
4012/*
4013 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4014 * 1) 16 bit mode
4015 * 2) 32 bit mode
4016 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4017 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4018 * save and restore
4019 * 3) 64-bit mode with REX.W prefix
4020 * - like (2), but XMM 8-15 are being saved and restored
4021 * 4) 64-bit mode without REX.W prefix
4022 * - like (3), but FIP and FDP are 64 bit
4023 *
4024 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4025 * desired result. (4) is not emulated.
4026 *
4027 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4028 * and FPU DS) should match.
4029 */
4030static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4031{
4032 struct fxregs_state fx_state;
4033 int rc;
4034
4035 rc = check_fxsr(ctxt);
4036 if (rc != X86EMUL_CONTINUE)
4037 return rc;
4038
4039 kvm_fpu_get();
4040
4041 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4042
4043 kvm_fpu_put();
4044
4045 if (rc != X86EMUL_CONTINUE)
4046 return rc;
4047
4048 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4049 fxstate_size(ctxt));
4050}
4051
4052/*
4053 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4054 * in the host registers (via FXSAVE) instead, so they won't be modified.
4055 * (preemption has to stay disabled until FXRSTOR).
4056 *
4057 * Use noinline to keep the stack for other functions called by callers small.
4058 */
4059static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4060 const size_t used_size)
4061{
4062 struct fxregs_state fx_tmp;
4063 int rc;
4064
4065 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4066 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4067 __fxstate_size(16) - used_size);
4068
4069 return rc;
4070}
4071
4072static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4073{
4074 struct fxregs_state fx_state;
4075 int rc;
4076 size_t size;
4077
4078 rc = check_fxsr(ctxt);
4079 if (rc != X86EMUL_CONTINUE)
4080 return rc;
4081
4082 size = fxstate_size(ctxt);
4083 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4084 if (rc != X86EMUL_CONTINUE)
4085 return rc;
4086
4087 kvm_fpu_get();
4088
4089 if (size < __fxstate_size(16)) {
4090 rc = fxregs_fixup(&fx_state, size);
4091 if (rc != X86EMUL_CONTINUE)
4092 goto out;
4093 }
4094
4095 if (fx_state.mxcsr >> 16) {
4096 rc = emulate_gp(ctxt, 0);
4097 goto out;
4098 }
4099
4100 if (rc == X86EMUL_CONTINUE)
4101 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4102
4103out:
4104 kvm_fpu_put();
4105
4106 return rc;
4107}
4108
4109static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4110{
4111 u32 eax, ecx, edx;
4112
4113 eax = reg_read(ctxt, VCPU_REGS_RAX);
4114 edx = reg_read(ctxt, VCPU_REGS_RDX);
4115 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4116
4117 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4118 return emulate_gp(ctxt, 0);
4119
4120 return X86EMUL_CONTINUE;
4121}
4122
4123static bool valid_cr(int nr)
4124{
4125 switch (nr) {
4126 case 0:
4127 case 2 ... 4:
4128 case 8:
4129 return true;
4130 default:
4131 return false;
4132 }
4133}
4134
4135static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4136{
4137 if (!valid_cr(ctxt->modrm_reg))
4138 return emulate_ud(ctxt);
4139
4140 return X86EMUL_CONTINUE;
4141}
4142
4143static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4144{
4145 unsigned long dr7;
4146
4147 ctxt->ops->get_dr(ctxt, 7, &dr7);
4148
4149 /* Check if DR7.Global_Enable is set */
4150 return dr7 & (1 << 13);
4151}
4152
4153static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4154{
4155 int dr = ctxt->modrm_reg;
4156 u64 cr4;
4157
4158 if (dr > 7)
4159 return emulate_ud(ctxt);
4160
4161 cr4 = ctxt->ops->get_cr(ctxt, 4);
4162 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4163 return emulate_ud(ctxt);
4164
4165 if (check_dr7_gd(ctxt)) {
4166 ulong dr6;
4167
4168 ctxt->ops->get_dr(ctxt, 6, &dr6);
4169 dr6 &= ~DR_TRAP_BITS;
4170 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4171 ctxt->ops->set_dr(ctxt, 6, dr6);
4172 return emulate_db(ctxt);
4173 }
4174
4175 return X86EMUL_CONTINUE;
4176}
4177
4178static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4179{
4180 u64 new_val = ctxt->src.val64;
4181 int dr = ctxt->modrm_reg;
4182
4183 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4184 return emulate_gp(ctxt, 0);
4185
4186 return check_dr_read(ctxt);
4187}
4188
4189static int check_svme(struct x86_emulate_ctxt *ctxt)
4190{
4191 u64 efer = 0;
4192
4193 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4194
4195 if (!(efer & EFER_SVME))
4196 return emulate_ud(ctxt);
4197
4198 return X86EMUL_CONTINUE;
4199}
4200
4201static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4202{
4203 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4204
4205 /* Valid physical address? */
4206 if (rax & 0xffff000000000000ULL)
4207 return emulate_gp(ctxt, 0);
4208
4209 return check_svme(ctxt);
4210}
4211
4212static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4213{
4214 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4215
4216 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4217 return emulate_gp(ctxt, 0);
4218
4219 return X86EMUL_CONTINUE;
4220}
4221
4222static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4223{
4224 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4225 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4226
4227 /*
4228 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4229 * in Ring3 when CR4.PCE=0.
4230 */
4231 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4232 return X86EMUL_CONTINUE;
4233
4234 /*
4235 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
4236 * check however is unnecessary because CPL is always 0 outside
4237 * protected mode.
4238 */
4239 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4240 ctxt->ops->check_pmc(ctxt, rcx))
4241 return emulate_gp(ctxt, 0);
4242
4243 return X86EMUL_CONTINUE;
4244}
4245
4246static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4247{
4248 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4249 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4250 return emulate_gp(ctxt, 0);
4251
4252 return X86EMUL_CONTINUE;
4253}
4254
4255static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4256{
4257 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4258 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4259 return emulate_gp(ctxt, 0);
4260
4261 return X86EMUL_CONTINUE;
4262}
4263
4264#define D(_y) { .flags = (_y) }
4265#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4266#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4267 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4268#define N D(NotImpl)
4269#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4270#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4271#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4272#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4273#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4274#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4275#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4276#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4277#define II(_f, _e, _i) \
4278 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4279#define IIP(_f, _e, _i, _p) \
4280 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4281 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4282#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4283
4284#define D2bv(_f) D((_f) | ByteOp), D(_f)
4285#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4286#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4287#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4288#define I2bvIP(_f, _e, _i, _p) \
4289 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4290
4291#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4292 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4293 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4294
4295static const struct opcode group7_rm0[] = {
4296 N,
4297 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4298 N, N, N, N, N, N,
4299};
4300
4301static const struct opcode group7_rm1[] = {
4302 DI(SrcNone | Priv, monitor),
4303 DI(SrcNone | Priv, mwait),
4304 N, N, N, N, N, N,
4305};
4306
4307static const struct opcode group7_rm2[] = {
4308 N,
4309 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4310 N, N, N, N, N, N,
4311};
4312
4313static const struct opcode group7_rm3[] = {
4314 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4315 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4316 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4317 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4318 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4319 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4320 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4321 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4322};
4323
4324static const struct opcode group7_rm7[] = {
4325 N,
4326 DIP(SrcNone, rdtscp, check_rdtsc),
4327 N, N, N, N, N, N,
4328};
4329
4330static const struct opcode group1[] = {
4331 F(Lock, em_add),
4332 F(Lock | PageTable, em_or),
4333 F(Lock, em_adc),
4334 F(Lock, em_sbb),
4335 F(Lock | PageTable, em_and),
4336 F(Lock, em_sub),
4337 F(Lock, em_xor),
4338 F(NoWrite, em_cmp),
4339};
4340
4341static const struct opcode group1A[] = {
4342 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4343};
4344
4345static const struct opcode group2[] = {
4346 F(DstMem | ModRM, em_rol),
4347 F(DstMem | ModRM, em_ror),
4348 F(DstMem | ModRM, em_rcl),
4349 F(DstMem | ModRM, em_rcr),
4350 F(DstMem | ModRM, em_shl),
4351 F(DstMem | ModRM, em_shr),
4352 F(DstMem | ModRM, em_shl),
4353 F(DstMem | ModRM, em_sar),
4354};
4355
4356static const struct opcode group3[] = {
4357 F(DstMem | SrcImm | NoWrite, em_test),
4358 F(DstMem | SrcImm | NoWrite, em_test),
4359 F(DstMem | SrcNone | Lock, em_not),
4360 F(DstMem | SrcNone | Lock, em_neg),
4361 F(DstXacc | Src2Mem, em_mul_ex),
4362 F(DstXacc | Src2Mem, em_imul_ex),
4363 F(DstXacc | Src2Mem, em_div_ex),
4364 F(DstXacc | Src2Mem, em_idiv_ex),
4365};
4366
4367static const struct opcode group4[] = {
4368 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4369 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4370 N, N, N, N, N, N,
4371};
4372
4373static const struct opcode group5[] = {
4374 F(DstMem | SrcNone | Lock, em_inc),
4375 F(DstMem | SrcNone | Lock, em_dec),
4376 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4377 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4378 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4379 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4380 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4381};
4382
4383static const struct opcode group6[] = {
4384 II(Prot | DstMem, em_sldt, sldt),
4385 II(Prot | DstMem, em_str, str),
4386 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4387 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4388 N, N, N, N,
4389};
4390
4391static const struct group_dual group7 = { {
4392 II(Mov | DstMem, em_sgdt, sgdt),
4393 II(Mov | DstMem, em_sidt, sidt),
4394 II(SrcMem | Priv, em_lgdt, lgdt),
4395 II(SrcMem | Priv, em_lidt, lidt),
4396 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4397 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4398 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4399}, {
4400 EXT(0, group7_rm0),
4401 EXT(0, group7_rm1),
4402 EXT(0, group7_rm2),
4403 EXT(0, group7_rm3),
4404 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4405 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4406 EXT(0, group7_rm7),
4407} };
4408
4409static const struct opcode group8[] = {
4410 N, N, N, N,
4411 F(DstMem | SrcImmByte | NoWrite, em_bt),
4412 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4413 F(DstMem | SrcImmByte | Lock, em_btr),
4414 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4415};
4416
4417/*
4418 * The "memory" destination is actually always a register, since we come
4419 * from the register case of group9.
4420 */
4421static const struct gprefix pfx_0f_c7_7 = {
4422 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4423};
4424
4425
4426static const struct group_dual group9 = { {
4427 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4428}, {
4429 N, N, N, N, N, N, N,
4430 GP(0, &pfx_0f_c7_7),
4431} };
4432
4433static const struct opcode group11[] = {
4434 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4435 X7(D(Undefined)),
4436};
4437
4438static const struct gprefix pfx_0f_ae_7 = {
4439 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4440};
4441
4442static const struct group_dual group15 = { {
4443 I(ModRM | Aligned16, em_fxsave),
4444 I(ModRM | Aligned16, em_fxrstor),
4445 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4446}, {
4447 N, N, N, N, N, N, N, N,
4448} };
4449
4450static const struct gprefix pfx_0f_6f_0f_7f = {
4451 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4452};
4453
4454static const struct instr_dual instr_dual_0f_2b = {
4455 I(0, em_mov), N
4456};
4457
4458static const struct gprefix pfx_0f_2b = {
4459 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4460};
4461
4462static const struct gprefix pfx_0f_10_0f_11 = {
4463 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4464};
4465
4466static const struct gprefix pfx_0f_28_0f_29 = {
4467 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4468};
4469
4470static const struct gprefix pfx_0f_e7 = {
4471 N, I(Sse, em_mov), N, N,
4472};
4473
4474static const struct escape escape_d9 = { {
4475 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4476}, {
4477 /* 0xC0 - 0xC7 */
4478 N, N, N, N, N, N, N, N,
4479 /* 0xC8 - 0xCF */
4480 N, N, N, N, N, N, N, N,
4481 /* 0xD0 - 0xC7 */
4482 N, N, N, N, N, N, N, N,
4483 /* 0xD8 - 0xDF */
4484 N, N, N, N, N, N, N, N,
4485 /* 0xE0 - 0xE7 */
4486 N, N, N, N, N, N, N, N,
4487 /* 0xE8 - 0xEF */
4488 N, N, N, N, N, N, N, N,
4489 /* 0xF0 - 0xF7 */
4490 N, N, N, N, N, N, N, N,
4491 /* 0xF8 - 0xFF */
4492 N, N, N, N, N, N, N, N,
4493} };
4494
4495static const struct escape escape_db = { {
4496 N, N, N, N, N, N, N, N,
4497}, {
4498 /* 0xC0 - 0xC7 */
4499 N, N, N, N, N, N, N, N,
4500 /* 0xC8 - 0xCF */
4501 N, N, N, N, N, N, N, N,
4502 /* 0xD0 - 0xC7 */
4503 N, N, N, N, N, N, N, N,
4504 /* 0xD8 - 0xDF */
4505 N, N, N, N, N, N, N, N,
4506 /* 0xE0 - 0xE7 */
4507 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4508 /* 0xE8 - 0xEF */
4509 N, N, N, N, N, N, N, N,
4510 /* 0xF0 - 0xF7 */
4511 N, N, N, N, N, N, N, N,
4512 /* 0xF8 - 0xFF */
4513 N, N, N, N, N, N, N, N,
4514} };
4515
4516static const struct escape escape_dd = { {
4517 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4518}, {
4519 /* 0xC0 - 0xC7 */
4520 N, N, N, N, N, N, N, N,
4521 /* 0xC8 - 0xCF */
4522 N, N, N, N, N, N, N, N,
4523 /* 0xD0 - 0xC7 */
4524 N, N, N, N, N, N, N, N,
4525 /* 0xD8 - 0xDF */
4526 N, N, N, N, N, N, N, N,
4527 /* 0xE0 - 0xE7 */
4528 N, N, N, N, N, N, N, N,
4529 /* 0xE8 - 0xEF */
4530 N, N, N, N, N, N, N, N,
4531 /* 0xF0 - 0xF7 */
4532 N, N, N, N, N, N, N, N,
4533 /* 0xF8 - 0xFF */
4534 N, N, N, N, N, N, N, N,
4535} };
4536
4537static const struct instr_dual instr_dual_0f_c3 = {
4538 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4539};
4540
4541static const struct mode_dual mode_dual_63 = {
4542 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4543};
4544
4545static const struct opcode opcode_table[256] = {
4546 /* 0x00 - 0x07 */
4547 F6ALU(Lock, em_add),
4548 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4549 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4550 /* 0x08 - 0x0F */
4551 F6ALU(Lock | PageTable, em_or),
4552 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4553 N,
4554 /* 0x10 - 0x17 */
4555 F6ALU(Lock, em_adc),
4556 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4557 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4558 /* 0x18 - 0x1F */
4559 F6ALU(Lock, em_sbb),
4560 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4561 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4562 /* 0x20 - 0x27 */
4563 F6ALU(Lock | PageTable, em_and), N, N,
4564 /* 0x28 - 0x2F */
4565 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4566 /* 0x30 - 0x37 */
4567 F6ALU(Lock, em_xor), N, N,
4568 /* 0x38 - 0x3F */
4569 F6ALU(NoWrite, em_cmp), N, N,
4570 /* 0x40 - 0x4F */
4571 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4572 /* 0x50 - 0x57 */
4573 X8(I(SrcReg | Stack, em_push)),
4574 /* 0x58 - 0x5F */
4575 X8(I(DstReg | Stack, em_pop)),
4576 /* 0x60 - 0x67 */
4577 I(ImplicitOps | Stack | No64, em_pusha),
4578 I(ImplicitOps | Stack | No64, em_popa),
4579 N, MD(ModRM, &mode_dual_63),
4580 N, N, N, N,
4581 /* 0x68 - 0x6F */
4582 I(SrcImm | Mov | Stack, em_push),
4583 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4584 I(SrcImmByte | Mov | Stack, em_push),
4585 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4586 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4587 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4588 /* 0x70 - 0x7F */
4589 X16(D(SrcImmByte | NearBranch | IsBranch)),
4590 /* 0x80 - 0x87 */
4591 G(ByteOp | DstMem | SrcImm, group1),
4592 G(DstMem | SrcImm, group1),
4593 G(ByteOp | DstMem | SrcImm | No64, group1),
4594 G(DstMem | SrcImmByte, group1),
4595 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4596 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4597 /* 0x88 - 0x8F */
4598 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4599 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4600 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4601 D(ModRM | SrcMem | NoAccess | DstReg),
4602 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4603 G(0, group1A),
4604 /* 0x90 - 0x97 */
4605 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4606 /* 0x98 - 0x9F */
4607 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4608 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4609 II(ImplicitOps | Stack, em_pushf, pushf),
4610 II(ImplicitOps | Stack, em_popf, popf),
4611 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4612 /* 0xA0 - 0xA7 */
4613 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4614 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4615 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4616 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4617 /* 0xA8 - 0xAF */
4618 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4619 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4620 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4621 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4622 /* 0xB0 - 0xB7 */
4623 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4624 /* 0xB8 - 0xBF */
4625 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4626 /* 0xC0 - 0xC7 */
4627 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4628 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4629 I(ImplicitOps | NearBranch | IsBranch, em_ret),
4630 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4631 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4632 G(ByteOp, group11), G(0, group11),
4633 /* 0xC8 - 0xCF */
4634 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4635 I(Stack | IsBranch, em_leave),
4636 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4637 I(ImplicitOps | IsBranch, em_ret_far),
4638 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4639 D(ImplicitOps | No64 | IsBranch),
4640 II(ImplicitOps | IsBranch, em_iret, iret),
4641 /* 0xD0 - 0xD7 */
4642 G(Src2One | ByteOp, group2), G(Src2One, group2),
4643 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4644 I(DstAcc | SrcImmUByte | No64, em_aam),
4645 I(DstAcc | SrcImmUByte | No64, em_aad),
4646 F(DstAcc | ByteOp | No64, em_salc),
4647 I(DstAcc | SrcXLat | ByteOp, em_mov),
4648 /* 0xD8 - 0xDF */
4649 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4650 /* 0xE0 - 0xE7 */
4651 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4652 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4653 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4654 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4655 /* 0xE8 - 0xEF */
4656 I(SrcImm | NearBranch | IsBranch, em_call),
4657 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4658 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4659 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4660 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4661 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4662 /* 0xF0 - 0xF7 */
4663 N, DI(ImplicitOps, icebp), N, N,
4664 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4665 G(ByteOp, group3), G(0, group3),
4666 /* 0xF8 - 0xFF */
4667 D(ImplicitOps), D(ImplicitOps),
4668 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4669 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4670};
4671
4672static const struct opcode twobyte_table[256] = {
4673 /* 0x00 - 0x0F */
4674 G(0, group6), GD(0, &group7), N, N,
4675 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4676 II(ImplicitOps | Priv, em_clts, clts), N,
4677 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4678 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4679 /* 0x10 - 0x1F */
4680 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4681 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4682 N, N, N, N, N, N,
4683 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4684 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4685 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4686 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4687 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4688 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4689 /* 0x20 - 0x2F */
4690 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4691 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4692 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4693 check_cr_access),
4694 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4695 check_dr_write),
4696 N, N, N, N,
4697 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4698 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4699 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4700 N, N, N, N,
4701 /* 0x30 - 0x3F */
4702 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4703 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4704 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4705 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4706 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4707 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4708 N, N,
4709 N, N, N, N, N, N, N, N,
4710 /* 0x40 - 0x4F */
4711 X16(D(DstReg | SrcMem | ModRM)),
4712 /* 0x50 - 0x5F */
4713 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4714 /* 0x60 - 0x6F */
4715 N, N, N, N,
4716 N, N, N, N,
4717 N, N, N, N,
4718 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4719 /* 0x70 - 0x7F */
4720 N, N, N, N,
4721 N, N, N, N,
4722 N, N, N, N,
4723 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4724 /* 0x80 - 0x8F */
4725 X16(D(SrcImm | NearBranch | IsBranch)),
4726 /* 0x90 - 0x9F */
4727 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4728 /* 0xA0 - 0xA7 */
4729 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4730 II(ImplicitOps, em_cpuid, cpuid),
4731 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4732 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4733 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4734 /* 0xA8 - 0xAF */
4735 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4736 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4737 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4738 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4739 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4740 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4741 /* 0xB0 - 0xB7 */
4742 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4743 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4744 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4745 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4746 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4747 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4748 /* 0xB8 - 0xBF */
4749 N, N,
4750 G(BitOp, group8),
4751 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4752 I(DstReg | SrcMem | ModRM, em_bsf_c),
4753 I(DstReg | SrcMem | ModRM, em_bsr_c),
4754 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4755 /* 0xC0 - 0xC7 */
4756 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4757 N, ID(0, &instr_dual_0f_c3),
4758 N, N, N, GD(0, &group9),
4759 /* 0xC8 - 0xCF */
4760 X8(I(DstReg, em_bswap)),
4761 /* 0xD0 - 0xDF */
4762 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4763 /* 0xE0 - 0xEF */
4764 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4765 N, N, N, N, N, N, N, N,
4766 /* 0xF0 - 0xFF */
4767 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4768};
4769
4770static const struct instr_dual instr_dual_0f_38_f0 = {
4771 I(DstReg | SrcMem | Mov, em_movbe), N
4772};
4773
4774static const struct instr_dual instr_dual_0f_38_f1 = {
4775 I(DstMem | SrcReg | Mov, em_movbe), N
4776};
4777
4778static const struct gprefix three_byte_0f_38_f0 = {
4779 ID(0, &instr_dual_0f_38_f0), N, N, N
4780};
4781
4782static const struct gprefix three_byte_0f_38_f1 = {
4783 ID(0, &instr_dual_0f_38_f1), N, N, N
4784};
4785
4786/*
4787 * Insns below are selected by the prefix which indexed by the third opcode
4788 * byte.
4789 */
4790static const struct opcode opcode_map_0f_38[256] = {
4791 /* 0x00 - 0x7f */
4792 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4793 /* 0x80 - 0xef */
4794 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4795 /* 0xf0 - 0xf1 */
4796 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4797 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4798 /* 0xf2 - 0xff */
4799 N, N, X4(N), X8(N)
4800};
4801
4802#undef D
4803#undef N
4804#undef G
4805#undef GD
4806#undef I
4807#undef GP
4808#undef EXT
4809#undef MD
4810#undef ID
4811
4812#undef D2bv
4813#undef D2bvIP
4814#undef I2bv
4815#undef I2bvIP
4816#undef I6ALU
4817
4818static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4819{
4820 unsigned size;
4821
4822 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4823 if (size == 8)
4824 size = 4;
4825 return size;
4826}
4827
4828static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4829 unsigned size, bool sign_extension)
4830{
4831 int rc = X86EMUL_CONTINUE;
4832
4833 op->type = OP_IMM;
4834 op->bytes = size;
4835 op->addr.mem.ea = ctxt->_eip;
4836 /* NB. Immediates are sign-extended as necessary. */
4837 switch (op->bytes) {
4838 case 1:
4839 op->val = insn_fetch(s8, ctxt);
4840 break;
4841 case 2:
4842 op->val = insn_fetch(s16, ctxt);
4843 break;
4844 case 4:
4845 op->val = insn_fetch(s32, ctxt);
4846 break;
4847 case 8:
4848 op->val = insn_fetch(s64, ctxt);
4849 break;
4850 }
4851 if (!sign_extension) {
4852 switch (op->bytes) {
4853 case 1:
4854 op->val &= 0xff;
4855 break;
4856 case 2:
4857 op->val &= 0xffff;
4858 break;
4859 case 4:
4860 op->val &= 0xffffffff;
4861 break;
4862 }
4863 }
4864done:
4865 return rc;
4866}
4867
4868static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4869 unsigned d)
4870{
4871 int rc = X86EMUL_CONTINUE;
4872
4873 switch (d) {
4874 case OpReg:
4875 decode_register_operand(ctxt, op);
4876 break;
4877 case OpImmUByte:
4878 rc = decode_imm(ctxt, op, 1, false);
4879 break;
4880 case OpMem:
4881 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4882 mem_common:
4883 *op = ctxt->memop;
4884 ctxt->memopp = op;
4885 if (ctxt->d & BitOp)
4886 fetch_bit_operand(ctxt);
4887 op->orig_val = op->val;
4888 break;
4889 case OpMem64:
4890 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4891 goto mem_common;
4892 case OpAcc:
4893 op->type = OP_REG;
4894 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4895 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4896 fetch_register_operand(op);
4897 op->orig_val = op->val;
4898 break;
4899 case OpAccLo:
4900 op->type = OP_REG;
4901 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4902 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4903 fetch_register_operand(op);
4904 op->orig_val = op->val;
4905 break;
4906 case OpAccHi:
4907 if (ctxt->d & ByteOp) {
4908 op->type = OP_NONE;
4909 break;
4910 }
4911 op->type = OP_REG;
4912 op->bytes = ctxt->op_bytes;
4913 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4914 fetch_register_operand(op);
4915 op->orig_val = op->val;
4916 break;
4917 case OpDI:
4918 op->type = OP_MEM;
4919 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4920 op->addr.mem.ea =
4921 register_address(ctxt, VCPU_REGS_RDI);
4922 op->addr.mem.seg = VCPU_SREG_ES;
4923 op->val = 0;
4924 op->count = 1;
4925 break;
4926 case OpDX:
4927 op->type = OP_REG;
4928 op->bytes = 2;
4929 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4930 fetch_register_operand(op);
4931 break;
4932 case OpCL:
4933 op->type = OP_IMM;
4934 op->bytes = 1;
4935 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4936 break;
4937 case OpImmByte:
4938 rc = decode_imm(ctxt, op, 1, true);
4939 break;
4940 case OpOne:
4941 op->type = OP_IMM;
4942 op->bytes = 1;
4943 op->val = 1;
4944 break;
4945 case OpImm:
4946 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4947 break;
4948 case OpImm64:
4949 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4950 break;
4951 case OpMem8:
4952 ctxt->memop.bytes = 1;
4953 if (ctxt->memop.type == OP_REG) {
4954 ctxt->memop.addr.reg = decode_register(ctxt,
4955 ctxt->modrm_rm, true);
4956 fetch_register_operand(&ctxt->memop);
4957 }
4958 goto mem_common;
4959 case OpMem16:
4960 ctxt->memop.bytes = 2;
4961 goto mem_common;
4962 case OpMem32:
4963 ctxt->memop.bytes = 4;
4964 goto mem_common;
4965 case OpImmU16:
4966 rc = decode_imm(ctxt, op, 2, false);
4967 break;
4968 case OpImmU:
4969 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4970 break;
4971 case OpSI:
4972 op->type = OP_MEM;
4973 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4974 op->addr.mem.ea =
4975 register_address(ctxt, VCPU_REGS_RSI);
4976 op->addr.mem.seg = ctxt->seg_override;
4977 op->val = 0;
4978 op->count = 1;
4979 break;
4980 case OpXLat:
4981 op->type = OP_MEM;
4982 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4983 op->addr.mem.ea =
4984 address_mask(ctxt,
4985 reg_read(ctxt, VCPU_REGS_RBX) +
4986 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4987 op->addr.mem.seg = ctxt->seg_override;
4988 op->val = 0;
4989 break;
4990 case OpImmFAddr:
4991 op->type = OP_IMM;
4992 op->addr.mem.ea = ctxt->_eip;
4993 op->bytes = ctxt->op_bytes + 2;
4994 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4995 break;
4996 case OpMemFAddr:
4997 ctxt->memop.bytes = ctxt->op_bytes + 2;
4998 goto mem_common;
4999 case OpES:
5000 op->type = OP_IMM;
5001 op->val = VCPU_SREG_ES;
5002 break;
5003 case OpCS:
5004 op->type = OP_IMM;
5005 op->val = VCPU_SREG_CS;
5006 break;
5007 case OpSS:
5008 op->type = OP_IMM;
5009 op->val = VCPU_SREG_SS;
5010 break;
5011 case OpDS:
5012 op->type = OP_IMM;
5013 op->val = VCPU_SREG_DS;
5014 break;
5015 case OpFS:
5016 op->type = OP_IMM;
5017 op->val = VCPU_SREG_FS;
5018 break;
5019 case OpGS:
5020 op->type = OP_IMM;
5021 op->val = VCPU_SREG_GS;
5022 break;
5023 case OpImplicit:
5024 /* Special instructions do their own operand decoding. */
5025 default:
5026 op->type = OP_NONE; /* Disable writeback. */
5027 break;
5028 }
5029
5030done:
5031 return rc;
5032}
5033
5034int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5035{
5036 int rc = X86EMUL_CONTINUE;
5037 int mode = ctxt->mode;
5038 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5039 bool op_prefix = false;
5040 bool has_seg_override = false;
5041 struct opcode opcode;
5042 u16 dummy;
5043 struct desc_struct desc;
5044
5045 ctxt->memop.type = OP_NONE;
5046 ctxt->memopp = NULL;
5047 ctxt->_eip = ctxt->eip;
5048 ctxt->fetch.ptr = ctxt->fetch.data;
5049 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5050 ctxt->opcode_len = 1;
5051 ctxt->intercept = x86_intercept_none;
5052 if (insn_len > 0)
5053 memcpy(ctxt->fetch.data, insn, insn_len);
5054 else {
5055 rc = __do_insn_fetch_bytes(ctxt, 1);
5056 if (rc != X86EMUL_CONTINUE)
5057 goto done;
5058 }
5059
5060 switch (mode) {
5061 case X86EMUL_MODE_REAL:
5062 case X86EMUL_MODE_VM86:
5063 def_op_bytes = def_ad_bytes = 2;
5064 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5065 if (desc.d)
5066 def_op_bytes = def_ad_bytes = 4;
5067 break;
5068 case X86EMUL_MODE_PROT16:
5069 def_op_bytes = def_ad_bytes = 2;
5070 break;
5071 case X86EMUL_MODE_PROT32:
5072 def_op_bytes = def_ad_bytes = 4;
5073 break;
5074#ifdef CONFIG_X86_64
5075 case X86EMUL_MODE_PROT64:
5076 def_op_bytes = 4;
5077 def_ad_bytes = 8;
5078 break;
5079#endif
5080 default:
5081 return EMULATION_FAILED;
5082 }
5083
5084 ctxt->op_bytes = def_op_bytes;
5085 ctxt->ad_bytes = def_ad_bytes;
5086
5087 /* Legacy prefixes. */
5088 for (;;) {
5089 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5090 case 0x66: /* operand-size override */
5091 op_prefix = true;
5092 /* switch between 2/4 bytes */
5093 ctxt->op_bytes = def_op_bytes ^ 6;
5094 break;
5095 case 0x67: /* address-size override */
5096 if (mode == X86EMUL_MODE_PROT64)
5097 /* switch between 4/8 bytes */
5098 ctxt->ad_bytes = def_ad_bytes ^ 12;
5099 else
5100 /* switch between 2/4 bytes */
5101 ctxt->ad_bytes = def_ad_bytes ^ 6;
5102 break;
5103 case 0x26: /* ES override */
5104 has_seg_override = true;
5105 ctxt->seg_override = VCPU_SREG_ES;
5106 break;
5107 case 0x2e: /* CS override */
5108 has_seg_override = true;
5109 ctxt->seg_override = VCPU_SREG_CS;
5110 break;
5111 case 0x36: /* SS override */
5112 has_seg_override = true;
5113 ctxt->seg_override = VCPU_SREG_SS;
5114 break;
5115 case 0x3e: /* DS override */
5116 has_seg_override = true;
5117 ctxt->seg_override = VCPU_SREG_DS;
5118 break;
5119 case 0x64: /* FS override */
5120 has_seg_override = true;
5121 ctxt->seg_override = VCPU_SREG_FS;
5122 break;
5123 case 0x65: /* GS override */
5124 has_seg_override = true;
5125 ctxt->seg_override = VCPU_SREG_GS;
5126 break;
5127 case 0x40 ... 0x4f: /* REX */
5128 if (mode != X86EMUL_MODE_PROT64)
5129 goto done_prefixes;
5130 ctxt->rex_prefix = ctxt->b;
5131 continue;
5132 case 0xf0: /* LOCK */
5133 ctxt->lock_prefix = 1;
5134 break;
5135 case 0xf2: /* REPNE/REPNZ */
5136 case 0xf3: /* REP/REPE/REPZ */
5137 ctxt->rep_prefix = ctxt->b;
5138 break;
5139 default:
5140 goto done_prefixes;
5141 }
5142
5143 /* Any legacy prefix after a REX prefix nullifies its effect. */
5144
5145 ctxt->rex_prefix = 0;
5146 }
5147
5148done_prefixes:
5149
5150 /* REX prefix. */
5151 if (ctxt->rex_prefix & 8)
5152 ctxt->op_bytes = 8; /* REX.W */
5153
5154 /* Opcode byte(s). */
5155 opcode = opcode_table[ctxt->b];
5156 /* Two-byte opcode? */
5157 if (ctxt->b == 0x0f) {
5158 ctxt->opcode_len = 2;
5159 ctxt->b = insn_fetch(u8, ctxt);
5160 opcode = twobyte_table[ctxt->b];
5161
5162 /* 0F_38 opcode map */
5163 if (ctxt->b == 0x38) {
5164 ctxt->opcode_len = 3;
5165 ctxt->b = insn_fetch(u8, ctxt);
5166 opcode = opcode_map_0f_38[ctxt->b];
5167 }
5168 }
5169 ctxt->d = opcode.flags;
5170
5171 if (ctxt->d & ModRM)
5172 ctxt->modrm = insn_fetch(u8, ctxt);
5173
5174 /* vex-prefix instructions are not implemented */
5175 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5176 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5177 ctxt->d = NotImpl;
5178 }
5179
5180 while (ctxt->d & GroupMask) {
5181 switch (ctxt->d & GroupMask) {
5182 case Group:
5183 goffset = (ctxt->modrm >> 3) & 7;
5184 opcode = opcode.u.group[goffset];
5185 break;
5186 case GroupDual:
5187 goffset = (ctxt->modrm >> 3) & 7;
5188 if ((ctxt->modrm >> 6) == 3)
5189 opcode = opcode.u.gdual->mod3[goffset];
5190 else
5191 opcode = opcode.u.gdual->mod012[goffset];
5192 break;
5193 case RMExt:
5194 goffset = ctxt->modrm & 7;
5195 opcode = opcode.u.group[goffset];
5196 break;
5197 case Prefix:
5198 if (ctxt->rep_prefix && op_prefix)
5199 return EMULATION_FAILED;
5200 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5201 switch (simd_prefix) {
5202 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5203 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5204 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5205 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5206 }
5207 break;
5208 case Escape:
5209 if (ctxt->modrm > 0xbf) {
5210 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5211 u32 index = array_index_nospec(
5212 ctxt->modrm - 0xc0, size);
5213
5214 opcode = opcode.u.esc->high[index];
5215 } else {
5216 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5217 }
5218 break;
5219 case InstrDual:
5220 if ((ctxt->modrm >> 6) == 3)
5221 opcode = opcode.u.idual->mod3;
5222 else
5223 opcode = opcode.u.idual->mod012;
5224 break;
5225 case ModeDual:
5226 if (ctxt->mode == X86EMUL_MODE_PROT64)
5227 opcode = opcode.u.mdual->mode64;
5228 else
5229 opcode = opcode.u.mdual->mode32;
5230 break;
5231 default:
5232 return EMULATION_FAILED;
5233 }
5234
5235 ctxt->d &= ~(u64)GroupMask;
5236 ctxt->d |= opcode.flags;
5237 }
5238
5239 ctxt->is_branch = opcode.flags & IsBranch;
5240
5241 /* Unrecognised? */
5242 if (ctxt->d == 0)
5243 return EMULATION_FAILED;
5244
5245 ctxt->execute = opcode.u.execute;
5246
5247 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5248 likely(!(ctxt->d & EmulateOnUD)))
5249 return EMULATION_FAILED;
5250
5251 if (unlikely(ctxt->d &
5252 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5253 No16))) {
5254 /*
5255 * These are copied unconditionally here, and checked unconditionally
5256 * in x86_emulate_insn.
5257 */
5258 ctxt->check_perm = opcode.check_perm;
5259 ctxt->intercept = opcode.intercept;
5260
5261 if (ctxt->d & NotImpl)
5262 return EMULATION_FAILED;
5263
5264 if (mode == X86EMUL_MODE_PROT64) {
5265 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5266 ctxt->op_bytes = 8;
5267 else if (ctxt->d & NearBranch)
5268 ctxt->op_bytes = 8;
5269 }
5270
5271 if (ctxt->d & Op3264) {
5272 if (mode == X86EMUL_MODE_PROT64)
5273 ctxt->op_bytes = 8;
5274 else
5275 ctxt->op_bytes = 4;
5276 }
5277
5278 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5279 ctxt->op_bytes = 4;
5280
5281 if (ctxt->d & Sse)
5282 ctxt->op_bytes = 16;
5283 else if (ctxt->d & Mmx)
5284 ctxt->op_bytes = 8;
5285 }
5286
5287 /* ModRM and SIB bytes. */
5288 if (ctxt->d & ModRM) {
5289 rc = decode_modrm(ctxt, &ctxt->memop);
5290 if (!has_seg_override) {
5291 has_seg_override = true;
5292 ctxt->seg_override = ctxt->modrm_seg;
5293 }
5294 } else if (ctxt->d & MemAbs)
5295 rc = decode_abs(ctxt, &ctxt->memop);
5296 if (rc != X86EMUL_CONTINUE)
5297 goto done;
5298
5299 if (!has_seg_override)
5300 ctxt->seg_override = VCPU_SREG_DS;
5301
5302 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5303
5304 /*
5305 * Decode and fetch the source operand: register, memory
5306 * or immediate.
5307 */
5308 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5309 if (rc != X86EMUL_CONTINUE)
5310 goto done;
5311
5312 /*
5313 * Decode and fetch the second source operand: register, memory
5314 * or immediate.
5315 */
5316 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5317 if (rc != X86EMUL_CONTINUE)
5318 goto done;
5319
5320 /* Decode and fetch the destination operand: register or memory. */
5321 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5322
5323 if (ctxt->rip_relative && likely(ctxt->memopp))
5324 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5325 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5326
5327done:
5328 if (rc == X86EMUL_PROPAGATE_FAULT)
5329 ctxt->have_exception = true;
5330 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5331}
5332
5333bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5334{
5335 return ctxt->d & PageTable;
5336}
5337
5338static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5339{
5340 /* The second termination condition only applies for REPE
5341 * and REPNE. Test if the repeat string operation prefix is
5342 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5343 * corresponding termination condition according to:
5344 * - if REPE/REPZ and ZF = 0 then done
5345 * - if REPNE/REPNZ and ZF = 1 then done
5346 */
5347 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5348 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5349 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5350 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5351 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5352 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5353 return true;
5354
5355 return false;
5356}
5357
5358static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5359{
5360 int rc;
5361
5362 kvm_fpu_get();
5363 rc = asm_safe("fwait");
5364 kvm_fpu_put();
5365
5366 if (unlikely(rc != X86EMUL_CONTINUE))
5367 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5368
5369 return X86EMUL_CONTINUE;
5370}
5371
5372static void fetch_possible_mmx_operand(struct operand *op)
5373{
5374 if (op->type == OP_MM)
5375 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5376}
5377
5378static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5379{
5380 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5381
5382 if (!(ctxt->d & ByteOp))
5383 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5384
5385 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5386 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5387 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5388 : "c"(ctxt->src2.val));
5389
5390 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5391 if (!fop) /* exception is returned in fop variable */
5392 return emulate_de(ctxt);
5393 return X86EMUL_CONTINUE;
5394}
5395
5396void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5397{
5398 memset(&ctxt->rip_relative, 0,
5399 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5400
5401 ctxt->io_read.pos = 0;
5402 ctxt->io_read.end = 0;
5403 ctxt->mem_read.end = 0;
5404}
5405
5406int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5407{
5408 const struct x86_emulate_ops *ops = ctxt->ops;
5409 int rc = X86EMUL_CONTINUE;
5410 int saved_dst_type = ctxt->dst.type;
5411 unsigned emul_flags;
5412
5413 ctxt->mem_read.pos = 0;
5414
5415 /* LOCK prefix is allowed only with some instructions */
5416 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5417 rc = emulate_ud(ctxt);
5418 goto done;
5419 }
5420
5421 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5422 rc = emulate_ud(ctxt);
5423 goto done;
5424 }
5425
5426 emul_flags = ctxt->ops->get_hflags(ctxt);
5427 if (unlikely(ctxt->d &
5428 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5429 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5430 (ctxt->d & Undefined)) {
5431 rc = emulate_ud(ctxt);
5432 goto done;
5433 }
5434
5435 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5436 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5437 rc = emulate_ud(ctxt);
5438 goto done;
5439 }
5440
5441 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5442 rc = emulate_nm(ctxt);
5443 goto done;
5444 }
5445
5446 if (ctxt->d & Mmx) {
5447 rc = flush_pending_x87_faults(ctxt);
5448 if (rc != X86EMUL_CONTINUE)
5449 goto done;
5450 /*
5451 * Now that we know the fpu is exception safe, we can fetch
5452 * operands from it.
5453 */
5454 fetch_possible_mmx_operand(&ctxt->src);
5455 fetch_possible_mmx_operand(&ctxt->src2);
5456 if (!(ctxt->d & Mov))
5457 fetch_possible_mmx_operand(&ctxt->dst);
5458 }
5459
5460 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5461 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5462 X86_ICPT_PRE_EXCEPT);
5463 if (rc != X86EMUL_CONTINUE)
5464 goto done;
5465 }
5466
5467 /* Instruction can only be executed in protected mode */
5468 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5469 rc = emulate_ud(ctxt);
5470 goto done;
5471 }
5472
5473 /* Privileged instruction can be executed only in CPL=0 */
5474 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5475 if (ctxt->d & PrivUD)
5476 rc = emulate_ud(ctxt);
5477 else
5478 rc = emulate_gp(ctxt, 0);
5479 goto done;
5480 }
5481
5482 /* Do instruction specific permission checks */
5483 if (ctxt->d & CheckPerm) {
5484 rc = ctxt->check_perm(ctxt);
5485 if (rc != X86EMUL_CONTINUE)
5486 goto done;
5487 }
5488
5489 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5490 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5491 X86_ICPT_POST_EXCEPT);
5492 if (rc != X86EMUL_CONTINUE)
5493 goto done;
5494 }
5495
5496 if (ctxt->rep_prefix && (ctxt->d & String)) {
5497 /* All REP prefixes have the same first termination condition */
5498 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5499 string_registers_quirk(ctxt);
5500 ctxt->eip = ctxt->_eip;
5501 ctxt->eflags &= ~X86_EFLAGS_RF;
5502 goto done;
5503 }
5504 }
5505 }
5506
5507 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5508 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5509 ctxt->src.valptr, ctxt->src.bytes);
5510 if (rc != X86EMUL_CONTINUE)
5511 goto done;
5512 ctxt->src.orig_val64 = ctxt->src.val64;
5513 }
5514
5515 if (ctxt->src2.type == OP_MEM) {
5516 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5517 &ctxt->src2.val, ctxt->src2.bytes);
5518 if (rc != X86EMUL_CONTINUE)
5519 goto done;
5520 }
5521
5522 if ((ctxt->d & DstMask) == ImplicitOps)
5523 goto special_insn;
5524
5525
5526 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5527 /* optimisation - avoid slow emulated read if Mov */
5528 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5529 &ctxt->dst.val, ctxt->dst.bytes);
5530 if (rc != X86EMUL_CONTINUE) {
5531 if (!(ctxt->d & NoWrite) &&
5532 rc == X86EMUL_PROPAGATE_FAULT &&
5533 ctxt->exception.vector == PF_VECTOR)
5534 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5535 goto done;
5536 }
5537 }
5538 /* Copy full 64-bit value for CMPXCHG8B. */
5539 ctxt->dst.orig_val64 = ctxt->dst.val64;
5540
5541special_insn:
5542
5543 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5544 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5545 X86_ICPT_POST_MEMACCESS);
5546 if (rc != X86EMUL_CONTINUE)
5547 goto done;
5548 }
5549
5550 if (ctxt->rep_prefix && (ctxt->d & String))
5551 ctxt->eflags |= X86_EFLAGS_RF;
5552 else
5553 ctxt->eflags &= ~X86_EFLAGS_RF;
5554
5555 if (ctxt->execute) {
5556 if (ctxt->d & Fastop)
5557 rc = fastop(ctxt, ctxt->fop);
5558 else
5559 rc = ctxt->execute(ctxt);
5560 if (rc != X86EMUL_CONTINUE)
5561 goto done;
5562 goto writeback;
5563 }
5564
5565 if (ctxt->opcode_len == 2)
5566 goto twobyte_insn;
5567 else if (ctxt->opcode_len == 3)
5568 goto threebyte_insn;
5569
5570 switch (ctxt->b) {
5571 case 0x70 ... 0x7f: /* jcc (short) */
5572 if (test_cc(ctxt->b, ctxt->eflags))
5573 rc = jmp_rel(ctxt, ctxt->src.val);
5574 break;
5575 case 0x8d: /* lea r16/r32, m */
5576 ctxt->dst.val = ctxt->src.addr.mem.ea;
5577 break;
5578 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5579 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5580 ctxt->dst.type = OP_NONE;
5581 else
5582 rc = em_xchg(ctxt);
5583 break;
5584 case 0x98: /* cbw/cwde/cdqe */
5585 switch (ctxt->op_bytes) {
5586 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5587 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5588 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5589 }
5590 break;
5591 case 0xcc: /* int3 */
5592 rc = emulate_int(ctxt, 3);
5593 break;
5594 case 0xcd: /* int n */
5595 rc = emulate_int(ctxt, ctxt->src.val);
5596 break;
5597 case 0xce: /* into */
5598 if (ctxt->eflags & X86_EFLAGS_OF)
5599 rc = emulate_int(ctxt, 4);
5600 break;
5601 case 0xe9: /* jmp rel */
5602 case 0xeb: /* jmp rel short */
5603 rc = jmp_rel(ctxt, ctxt->src.val);
5604 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5605 break;
5606 case 0xf4: /* hlt */
5607 ctxt->ops->halt(ctxt);
5608 break;
5609 case 0xf5: /* cmc */
5610 /* complement carry flag from eflags reg */
5611 ctxt->eflags ^= X86_EFLAGS_CF;
5612 break;
5613 case 0xf8: /* clc */
5614 ctxt->eflags &= ~X86_EFLAGS_CF;
5615 break;
5616 case 0xf9: /* stc */
5617 ctxt->eflags |= X86_EFLAGS_CF;
5618 break;
5619 case 0xfc: /* cld */
5620 ctxt->eflags &= ~X86_EFLAGS_DF;
5621 break;
5622 case 0xfd: /* std */
5623 ctxt->eflags |= X86_EFLAGS_DF;
5624 break;
5625 default:
5626 goto cannot_emulate;
5627 }
5628
5629 if (rc != X86EMUL_CONTINUE)
5630 goto done;
5631
5632writeback:
5633 if (ctxt->d & SrcWrite) {
5634 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5635 rc = writeback(ctxt, &ctxt->src);
5636 if (rc != X86EMUL_CONTINUE)
5637 goto done;
5638 }
5639 if (!(ctxt->d & NoWrite)) {
5640 rc = writeback(ctxt, &ctxt->dst);
5641 if (rc != X86EMUL_CONTINUE)
5642 goto done;
5643 }
5644
5645 /*
5646 * restore dst type in case the decoding will be reused
5647 * (happens for string instruction )
5648 */
5649 ctxt->dst.type = saved_dst_type;
5650
5651 if ((ctxt->d & SrcMask) == SrcSI)
5652 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5653
5654 if ((ctxt->d & DstMask) == DstDI)
5655 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5656
5657 if (ctxt->rep_prefix && (ctxt->d & String)) {
5658 unsigned int count;
5659 struct read_cache *r = &ctxt->io_read;
5660 if ((ctxt->d & SrcMask) == SrcSI)
5661 count = ctxt->src.count;
5662 else
5663 count = ctxt->dst.count;
5664 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5665
5666 if (!string_insn_completed(ctxt)) {
5667 /*
5668 * Re-enter guest when pio read ahead buffer is empty
5669 * or, if it is not used, after each 1024 iteration.
5670 */
5671 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5672 (r->end == 0 || r->end != r->pos)) {
5673 /*
5674 * Reset read cache. Usually happens before
5675 * decode, but since instruction is restarted
5676 * we have to do it here.
5677 */
5678 ctxt->mem_read.end = 0;
5679 writeback_registers(ctxt);
5680 return EMULATION_RESTART;
5681 }
5682 goto done; /* skip rip writeback */
5683 }
5684 ctxt->eflags &= ~X86_EFLAGS_RF;
5685 }
5686
5687 ctxt->eip = ctxt->_eip;
5688 if (ctxt->mode != X86EMUL_MODE_PROT64)
5689 ctxt->eip = (u32)ctxt->_eip;
5690
5691done:
5692 if (rc == X86EMUL_PROPAGATE_FAULT) {
5693 WARN_ON(ctxt->exception.vector > 0x1f);
5694 ctxt->have_exception = true;
5695 }
5696 if (rc == X86EMUL_INTERCEPTED)
5697 return EMULATION_INTERCEPTED;
5698
5699 if (rc == X86EMUL_CONTINUE)
5700 writeback_registers(ctxt);
5701
5702 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5703
5704twobyte_insn:
5705 switch (ctxt->b) {
5706 case 0x09: /* wbinvd */
5707 (ctxt->ops->wbinvd)(ctxt);
5708 break;
5709 case 0x08: /* invd */
5710 case 0x0d: /* GrpP (prefetch) */
5711 case 0x18: /* Grp16 (prefetch/nop) */
5712 case 0x1f: /* nop */
5713 break;
5714 case 0x20: /* mov cr, reg */
5715 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5716 break;
5717 case 0x21: /* mov from dr to reg */
5718 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5719 break;
5720 case 0x40 ... 0x4f: /* cmov */
5721 if (test_cc(ctxt->b, ctxt->eflags))
5722 ctxt->dst.val = ctxt->src.val;
5723 else if (ctxt->op_bytes != 4)
5724 ctxt->dst.type = OP_NONE; /* no writeback */
5725 break;
5726 case 0x80 ... 0x8f: /* jnz rel, etc*/
5727 if (test_cc(ctxt->b, ctxt->eflags))
5728 rc = jmp_rel(ctxt, ctxt->src.val);
5729 break;
5730 case 0x90 ... 0x9f: /* setcc r/m8 */
5731 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5732 break;
5733 case 0xb6 ... 0xb7: /* movzx */
5734 ctxt->dst.bytes = ctxt->op_bytes;
5735 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5736 : (u16) ctxt->src.val;
5737 break;
5738 case 0xbe ... 0xbf: /* movsx */
5739 ctxt->dst.bytes = ctxt->op_bytes;
5740 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5741 (s16) ctxt->src.val;
5742 break;
5743 default:
5744 goto cannot_emulate;
5745 }
5746
5747threebyte_insn:
5748
5749 if (rc != X86EMUL_CONTINUE)
5750 goto done;
5751
5752 goto writeback;
5753
5754cannot_emulate:
5755 return EMULATION_FAILED;
5756}
5757
5758void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5759{
5760 invalidate_registers(ctxt);
5761}
5762
5763void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5764{
5765 writeback_registers(ctxt);
5766}
5767
5768bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5769{
5770 if (ctxt->rep_prefix && (ctxt->d & String))
5771 return false;
5772
5773 if (ctxt->d & TwoMemOp)
5774 return false;
5775
5776 return true;
5777}