Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/lib/libnvmm Improvements and fixes:
details: https://anonhg.NetBSD.org/src/rev/f345713e1424
branches: trunk
changeset: 838209:f345713e1424
user: maxv <maxv%NetBSD.org@localhost>
date: Mon Jan 07 13:47:33 2019 +0000
description:
Improvements and fixes:
* Decode AND/OR/XOR from Group1.
* Sign-extend the immediates and displacements in 64bit mode.
* Fix the storage of {read,write}_guest_memory, now that we batch certain
IO operations we can copy more than 8 bytes, and shit hits the fan.
* Remove the CR4_PSE check in the 64bit MMU. This bit is actually ignored
in long mode, and some systems (like FreeBSD) don't set it.
diffstat:
lib/libnvmm/libnvmm_x86.c | 138 +++++++++++++++++++++++++++++-----------------
1 files changed, 87 insertions(+), 51 deletions(-)
diffs (truncated from 380 to 300 lines):
diff -r 752c4c40d898 -r f345713e1424 lib/libnvmm/libnvmm_x86.c
--- a/lib/libnvmm/libnvmm_x86.c Mon Jan 07 13:10:44 2019 +0000
+++ b/lib/libnvmm/libnvmm_x86.c Mon Jan 07 13:47:33 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: libnvmm_x86.c,v 1.10 2019/01/06 16:10:51 maxv Exp $ */
+/* $NetBSD: libnvmm_x86.c,v 1.11 2019/01/07 13:47:33 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -282,7 +282,7 @@
static int
x86_gva_to_gpa_64bit(struct nvmm_machine *mach, uint64_t cr3,
- gvaddr_t gva, gpaddr_t *gpa, bool has_pse, nvmm_prot_t *prot)
+ gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
{
gpaddr_t L4gpa, L3gpa, L2gpa, L1gpa;
uintptr_t L4hva, L3hva, L2hva, L1hva;
@@ -325,8 +325,6 @@
*prot &= ~NVMM_PROT_WRITE;
if (pte & PG_NX)
*prot &= ~NVMM_PROT_EXEC;
- if ((pte & PG_PS) && !has_pse)
- return -1;
if (pte & PG_PS) {
*gpa = (pte & PTE64_L3_FRAME);
*gpa = *gpa + (gva & (PTE64_L2_MASK|PTE64_L1_MASK));
@@ -347,8 +345,6 @@
*prot &= ~NVMM_PROT_WRITE;
if (pte & PG_NX)
*prot &= ~NVMM_PROT_EXEC;
- if ((pte & PG_PS) && !has_pse)
- return -1;
if (pte & PG_PS) {
*gpa = (pte & PTE64_L2_FRAME);
*gpa = *gpa + (gva & PTE64_L1_MASK);
@@ -402,7 +398,7 @@
if (is_pae && is_lng) {
/* 64bit */
- ret = x86_gva_to_gpa_64bit(mach, cr3, gva, gpa, has_pse, prot);
+ ret = x86_gva_to_gpa_64bit(mach, cr3, gva, gpa, prot);
} else if (is_pae && !is_lng) {
/* 32bit PAE */
ret = x86_gva_to_gpa_32bit_pae(mach, cr3, gva, gpa, has_pse,
@@ -553,7 +549,6 @@
gvaddr_t gva, uint8_t *data, size_t size)
{
struct nvmm_mem mem;
- uint8_t membuf[8];
nvmm_prot_t prot;
gpaddr_t gpa;
uintptr_t hva;
@@ -580,13 +575,12 @@
is_mmio = (ret == -1);
if (is_mmio) {
- mem.data = membuf;
+ mem.data = data;
mem.gva = gva;
mem.gpa = gpa;
mem.write = false;
mem.size = size;
(*__callbacks.mem)(&mem);
- memcpy(data, mem.data, size);
} else {
memcpy(data, (uint8_t *)hva, size);
}
@@ -606,7 +600,6 @@
gvaddr_t gva, uint8_t *data, size_t size)
{
struct nvmm_mem mem;
- uint8_t membuf[8];
nvmm_prot_t prot;
gpaddr_t gpa;
uintptr_t hva;
@@ -633,11 +626,10 @@
is_mmio = (ret == -1);
if (is_mmio) {
- mem.data = membuf;
+ mem.data = data;
mem.gva = gva;
mem.gpa = gpa;
mem.write = true;
- memcpy(mem.data, data, size);
mem.size = size;
(*__callbacks.mem)(&mem);
} else {
@@ -878,7 +870,7 @@
struct x86_disp {
enum x86_disp_type type;
- uint8_t data[4];
+ uint64_t data; /* 4 bytes, but can be sign-extended */
};
enum REGMODRM__Mod {
@@ -919,7 +911,7 @@
struct x86_immediate {
size_t size; /* 1/2/4/8 */
- uint8_t data[8];
+ uint64_t data;
};
struct x86_sib {
@@ -992,9 +984,9 @@
bool szoverride;
int defsize;
int allsize;
+ bool group1;
bool group11;
bool immediate;
- int immsize;
int flags;
void (*emul)(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
};
@@ -1008,8 +1000,15 @@
#define OPSIZE_DOUB 0x04 /* 4 bytes */
#define OPSIZE_QUAD 0x08 /* 8 bytes */
-#define FLAG_z 0x02
-#define FLAG_e 0x10
+#define FLAG_imm8 0x01
+#define FLAG_immz 0x02
+#define FLAG_ze 0x04
+
+static const struct x86_group_entry group1[8] = {
+ [1] = { .emul = x86_emul_or },
+ [4] = { .emul = x86_emul_and },
+ [6] = { .emul = x86_emul_xor }
+};
static const struct x86_group_entry group11[8] = {
[0] = { .emul = x86_emul_mov }
@@ -1017,9 +1016,27 @@
static const struct x86_opcode primary_opcode_table[] = {
/*
+ * Group1
+ */
+ {
+ /* Ev, Ib */
+ .byte = 0x83,
+ .regmodrm = true,
+ .regtorm = true,
+ .szoverride = true,
+ .defsize = -1,
+ .allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
+ .group1 = true,
+ .immediate = true,
+ .flags = FLAG_imm8,
+ .emul = NULL /* group1 */
+ },
+
+ /*
* Group11
*/
{
+ /* Eb, Ib */
.byte = 0xC6,
.regmodrm = true,
.regtorm = true,
@@ -1028,10 +1045,10 @@
.allsize = -1,
.group11 = true,
.immediate = true,
- .immsize = OPSIZE_BYTE,
.emul = NULL /* group11 */
},
{
+ /* Ev, Iz */
.byte = 0xC7,
.regmodrm = true,
.regtorm = true,
@@ -1040,8 +1057,7 @@
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
.group11 = true,
.immediate = true,
- .immsize = -1, /* special, Z */
- .flags = FLAG_z,
+ .flags = FLAG_immz,
.emul = NULL /* group11 */
},
@@ -1340,7 +1356,7 @@
.szoverride = true,
.defsize = OPSIZE_BYTE,
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
- .flags = FLAG_e,
+ .flags = FLAG_ze,
.emul = x86_emul_mov
},
{
@@ -1351,7 +1367,7 @@
.szoverride = true,
.defsize = OPSIZE_WORD,
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
- .flags = FLAG_e,
+ .flags = FLAG_ze,
.emul = x86_emul_mov
},
};
@@ -1756,34 +1772,54 @@
return 0;
}
+static uint64_t
+sign_extend(uint64_t val, int size)
+{
+ if (size == 1) {
+ if (val & __BIT(7))
+ val |= 0xFFFFFFFFFFFFFF00;
+ } else if (size == 2) {
+ if (val & __BIT(15))
+ val |= 0xFFFFFFFFFFFF0000;
+ } else if (size == 4) {
+ if (val & __BIT(31))
+ val |= 0xFFFFFFFF00000000;
+ }
+ return val;
+}
+
static int
node_immediate(struct x86_decode_fsm *fsm, struct x86_instr *instr)
{
const struct x86_opcode *opcode = instr->opcode;
struct x86_store *store;
- uint8_t flags;
uint8_t immsize;
+ size_t sesize = 0;
/* The immediate is the source */
store = &instr->src;
immsize = instr->operand_size;
- /* Get the correct flags */
- flags = opcode->flags;
- if ((flags & FLAG_z) && (immsize == 8)) {
- /* 'z' operates here */
+ if (opcode->flags & FLAG_imm8) {
+ sesize = immsize;
+ immsize = 1;
+ } else if ((opcode->flags & FLAG_immz) && (immsize == 8)) {
+ sesize = immsize;
immsize = 4;
}
store->type = STORE_IMM;
store->u.imm.size = immsize;
-
- if (fsm_read(fsm, store->u.imm.data, store->u.imm.size) == -1) {
+ if (fsm_read(fsm, (uint8_t *)&store->u.imm.data, immsize) == -1) {
return -1;
}
-
fsm_advance(fsm, store->u.imm.size, NULL);
+ if (sesize != 0) {
+ store->u.imm.data = sign_extend(store->u.imm.data, sesize);
+ store->u.imm.size = sesize;
+ }
+
return 0;
}
@@ -1791,6 +1827,7 @@
node_disp(struct x86_decode_fsm *fsm, struct x86_instr *instr)
{
const struct x86_opcode *opcode = instr->opcode;
+ uint64_t data = 0;
size_t n;
if (instr->strm->disp.type == DISP_1) {
@@ -1799,10 +1836,16 @@
n = 4;
}
- if (fsm_read(fsm, instr->strm->disp.data, n) == -1) {
+ if (fsm_read(fsm, (uint8_t *)&data, n) == -1) {
return -1;
}
+ if (__predict_true(fsm->is64bit)) {
+ data = sign_extend(data, n);
+ }
+
+ instr->strm->disp.data = data;
+
if (opcode->immediate) {
fsm_advance(fsm, n, node_immediate);
} else {
@@ -1903,12 +1946,7 @@
const struct x86_reg *reg;
size_t regsize;
- if ((opcode->flags & FLAG_z) && (instr->operand_size == 8)) {
- /* 'z' operates here */
- regsize = 4;
Home |
Main Index |
Thread Index |
Old Index