pkgsrc-WIP-changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

qemu-nvmm: update to 4.1.0



Module Name:	pkgsrc-wip
Committed By:	Maya Rashish <maya%NetBSD.org@localhost>
Pushed By:	coypu
Date:		Sat Oct 19 13:59:23 2019 +0300
Changeset:	ddb55b59a82f12ca6a957ecaa4c3007226eb26cf

Modified Files:
	qemu-nvmm/DESCR
	qemu-nvmm/Makefile
	qemu-nvmm/PLIST
	qemu-nvmm/distinfo
	qemu-nvmm/patches/patch-Makefile
	qemu-nvmm/patches/patch-hw_core_uboot__image.h
	qemu-nvmm/patches/patch-hw_usb_dev-mtp.c
	qemu-nvmm/patches/patch-nvmm-support
Added Files:
	qemu-nvmm/files/accel_stubs_nvmm-stub.c
	qemu-nvmm/files/include_sysemu_nvmm.h
	qemu-nvmm/files/target_i386_nvmm-all.c
Removed Files:
	qemu-nvmm/patches/patch-block.c
	qemu-nvmm/patches/patch-hw_arm_boot.c
	qemu-nvmm/patches/patch-hw_core_loader.c
	qemu-nvmm/patches/patch-target_arm_cpu.h
	qemu-nvmm/patches/patch-target_arm_helper.c
	qemu-nvmm/patches/patch-tests_Makefile.include

Log Message:
qemu-nvmm: update to 4.1.0

based on emulators/qemu's update.
- Moved new files from patch-nvmm-support to files/, I had
trouble maintaining new files created from patch in the past.
- Changed PKGNAME to be distinct from emulators/qemu
- Describe difference from emulators/qemu in DESCR

To see a diff of this commit:
https://wip.pkgsrc.org/cgi-bin/gitweb.cgi?p=pkgsrc-wip.git;a=commitdiff;h=ddb55b59a82f12ca6a957ecaa4c3007226eb26cf

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

diffstat:
 qemu-nvmm/DESCR                                |    2 +
 qemu-nvmm/Makefile                             |   27 +-
 qemu-nvmm/PLIST                                |  105 +-
 qemu-nvmm/distinfo                             |   22 +-
 qemu-nvmm/files/accel_stubs_nvmm-stub.c        |   43 +
 qemu-nvmm/files/include_sysemu_nvmm.h          |   35 +
 qemu-nvmm/files/target_i386_nvmm-all.c         | 1168 ++++++++++++++++++++
 qemu-nvmm/patches/patch-Makefile               |   10 +-
 qemu-nvmm/patches/patch-block.c                |   42 -
 qemu-nvmm/patches/patch-hw_arm_boot.c          |   26 -
 qemu-nvmm/patches/patch-hw_core_loader.c       |   27 -
 qemu-nvmm/patches/patch-hw_core_uboot__image.h |   12 +-
 qemu-nvmm/patches/patch-hw_usb_dev-mtp.c       |   18 +-
 qemu-nvmm/patches/patch-nvmm-support           | 1375 +-----------------------
 qemu-nvmm/patches/patch-target_arm_cpu.h       |   20 -
 qemu-nvmm/patches/patch-target_arm_helper.c    |   20 -
 qemu-nvmm/patches/patch-tests_Makefile.include |   17 -
 17 files changed, 1449 insertions(+), 1520 deletions(-)

diffs:
diff --git a/qemu-nvmm/DESCR b/qemu-nvmm/DESCR
index d939711a26..6aac387b36 100644
--- a/qemu-nvmm/DESCR
+++ b/qemu-nvmm/DESCR
@@ -9,3 +9,5 @@ good emulation speed, QEMU has two operating modes:
       Linux processes compiled for one CPU on another CPU. It can be used
       to launch the Wine Windows API emulator or to ease cross-compilation
       and cross-debugging.
+
+This variant is patched to use NetBSD's NVMM acceleration.
diff --git a/qemu-nvmm/Makefile b/qemu-nvmm/Makefile
index d6dad791e9..8b96a9ad28 100644
--- a/qemu-nvmm/Makefile
+++ b/qemu-nvmm/Makefile
@@ -1,7 +1,7 @@
 # $NetBSD: Makefile,v 1.200 2019/01/31 13:39:10 martin Exp $
 
-DISTNAME=	qemu-3.1.0
-PKGREVISION=	4
+DISTNAME=	qemu-4.1.0
+PKGNAME=	${DISTNAME:S/qemu/qemu-nvmm/}
 CATEGORIES=	emulators
 MASTER_SITES=	https://download.qemu.org/
 EXTRACT_SUFX=	.tar.xz
@@ -13,12 +13,15 @@ LICENSE=	gnu-gpl-v2 AND gnu-lgpl-v2.1 AND mit AND modified-bsd
 
 USE_CURSES=		resize_term wide
 USE_LANGUAGES+=		c c++
-USE_TOOLS+=		bison flex gmake makeinfo perl:build pkg-config
+USE_TOOLS+=		bison flex gmake makeinfo perl:build pod2man pkg-config
 FAKE_NCURSES=		yes
 UNLIMIT_RESOURCES=	datasize
 HAS_CONFIGURE=		yes
 
-BUILD_DEPENDS+=		texi2html-[0-9]*:../../textproc/texi2html
+GMAKE_REQD=		4.1 # needed for docs
+TOOL_DEPENDS+=		${PYPKGPREFIX}-sphinx-[0-9]*:../../textproc/py-sphinx
+
+CHECK_PORTABILITY_SKIP+=	roms/u-boot/tools/imx8m_image.sh
 
 SUBST_CLASSES+=			prefix
 SUBST_STAGE.prefix=		pre-configure
@@ -26,6 +29,14 @@ SUBST_MESSAGE.prefix=		Setting PREFIX
 SUBST_FILES.prefix+=		configure
 SUBST_VARS.prefix+=		PREFIX
 
+SUBST_CLASSES+=			sphinx-build
+SUBST_STAGE.sphinx-build=	pre-configure
+SUBST_MESSAGE.sphinx-build=	Fix hardcoded sphinx-build
+SUBST_FILES.sphinx-build+=	Makefile configure
+SUBST_FILES.sphinx-build+=	roms/skiboot/doc/Makefile
+SUBST_FILES.sphinx-build+=	roms/u-boot/Documentation/Makefile
+SUBST_SED.sphinx-build+=	-e 's/sphinx-build/sphinx-build${PYVERSSUFFIX}/g'
+
 .include "options.mk"
 
 .include "../../mk/bsd.prefs.mk"
@@ -37,6 +48,7 @@ CONFIGURE_ARGS+=	--python=${PYTHONBIN}
 CONFIGURE_ARGS+=	--smbd=${PREFIX}/sbin/smbd
 CONFIGURE_ARGS+=	--mandir=${PREFIX}/${PKGMANDIR}
 CONFIGURE_ARGS+=	--enable-curses
+CONFIGURE_ARGS+=	--enable-docs
 CONFIGURE_ARGS+=	--enable-jemalloc
 CONFIGURE_ARGS+=	--disable-opengl
 CONFIGURE_ARGS+=	--target-list=x86_64-softmmu
@@ -55,8 +67,6 @@ PKG_SYSCONFSUBDIR=	qemu
 
 REPLACE_PERL+=		scripts/texi2pod.pl
 
-PYTHON_VERSIONS_ACCEPTED=	27 # not yet ported yet as of 2.10.0
-
 INSTALLATION_DIRS=	${PKGMANDIR}/man1 share/doc/qemu
 
 UE_ARCHS+=		x86_64
@@ -87,6 +97,11 @@ PLIST.${pvar}=		YES
 
 TEST_TARGET=		check
 
+pre-configure:
+	${CP} ${FILESDIR}/accel_stubs_nvmm-stub.c ${WRKSRC}/accel/stubs/nvmm-stub.c
+	${CP} ${FILESDIR}/include_sysemu_nvmm.h ${WRKSRC}/include/sysemu/nvmm.h
+	${CP} ${FILESDIR}/target_i386_nvmm-all.c ${WRKSRC}/target/i386/nvmm-all.c
+
 post-install:
 	${INSTALL_DATA} ${FILESDIR}/Makefile.multinode-NetBSD \
 		${DESTDIR}${PREFIX}/share/doc/qemu/
diff --git a/qemu-nvmm/PLIST b/qemu-nvmm/PLIST
index 8b2c3b73ab..de4a8bf04f 100644
--- a/qemu-nvmm/PLIST
+++ b/qemu-nvmm/PLIST
@@ -1,4 +1,5 @@
 @comment $NetBSD$
+bin/elf2dmp
 bin/qemu-edid
 bin/qemu-ga
 bin/qemu-img
@@ -13,19 +14,106 @@ man/man7/qemu-ga-ref.7
 man/man7/qemu-qmp-ref.7
 man/man8/qemu-ga.8
 man/man8/qemu-nbd.8
+share/applications/qemu.desktop
 share/doc/qemu/Makefile.multinode-NetBSD
+share/doc/qemu/interop/.buildinfo
+share/doc/qemu/interop/_static/ajax-loader.gif
+share/doc/qemu/interop/_static/alabaster.css
+share/doc/qemu/interop/_static/basic.css
+share/doc/qemu/interop/_static/comment-bright.png
+share/doc/qemu/interop/_static/comment-close.png
+share/doc/qemu/interop/_static/comment.png
+share/doc/qemu/interop/_static/custom.css
+share/doc/qemu/interop/_static/doctools.js
+share/doc/qemu/interop/_static/documentation_options.js
+share/doc/qemu/interop/_static/down-pressed.png
+share/doc/qemu/interop/_static/down.png
+share/doc/qemu/interop/_static/file.png
+share/doc/qemu/interop/_static/jquery-3.2.1.js
+share/doc/qemu/interop/_static/jquery.js
+share/doc/qemu/interop/_static/language_data.js
+share/doc/qemu/interop/_static/minus.png
+share/doc/qemu/interop/_static/plus.png
+share/doc/qemu/interop/_static/pygments.css
+share/doc/qemu/interop/_static/searchtools.js
+share/doc/qemu/interop/_static/underscore-1.3.1.js
+share/doc/qemu/interop/_static/underscore.js
+share/doc/qemu/interop/_static/up-pressed.png
+share/doc/qemu/interop/_static/up.png
+share/doc/qemu/interop/_static/websupport.js
+share/doc/qemu/interop/bitmaps.html
+share/doc/qemu/interop/genindex.html
+share/doc/qemu/interop/index.html
+share/doc/qemu/interop/live-block-operations.html
+share/doc/qemu/interop/objects.inv
+share/doc/qemu/interop/pr-helper.html
+share/doc/qemu/interop/search.html
+share/doc/qemu/interop/searchindex.js
+share/doc/qemu/interop/vhost-user-gpu.html
+share/doc/qemu/interop/vhost-user.html
 share/doc/qemu/qemu-doc.html
 share/doc/qemu/qemu-doc.txt
 share/doc/qemu/qemu-ga-ref.html
 share/doc/qemu/qemu-ga-ref.txt
 share/doc/qemu/qemu-qmp-ref.html
 share/doc/qemu/qemu-qmp-ref.txt
+share/doc/qemu/specs/.buildinfo
+share/doc/qemu/specs/_static/ajax-loader.gif
+share/doc/qemu/specs/_static/alabaster.css
+share/doc/qemu/specs/_static/basic.css
+share/doc/qemu/specs/_static/comment-bright.png
+share/doc/qemu/specs/_static/comment-close.png
+share/doc/qemu/specs/_static/comment.png
+share/doc/qemu/specs/_static/custom.css
+share/doc/qemu/specs/_static/doctools.js
+share/doc/qemu/specs/_static/documentation_options.js
+share/doc/qemu/specs/_static/down-pressed.png
+share/doc/qemu/specs/_static/down.png
+share/doc/qemu/specs/_static/file.png
+share/doc/qemu/specs/_static/jquery-3.2.1.js
+share/doc/qemu/specs/_static/jquery.js
+share/doc/qemu/specs/_static/language_data.js
+share/doc/qemu/specs/_static/minus.png
+share/doc/qemu/specs/_static/plus.png
+share/doc/qemu/specs/_static/pygments.css
+share/doc/qemu/specs/_static/searchtools.js
+share/doc/qemu/specs/_static/underscore-1.3.1.js
+share/doc/qemu/specs/_static/underscore.js
+share/doc/qemu/specs/_static/up-pressed.png
+share/doc/qemu/specs/_static/up.png
+share/doc/qemu/specs/_static/websupport.js
+share/doc/qemu/specs/genindex.html
+share/doc/qemu/specs/index.html
+share/doc/qemu/specs/objects.inv
+share/doc/qemu/specs/ppc-spapr-xive.html
+share/doc/qemu/specs/ppc-xive.html
+share/doc/qemu/specs/search.html
+share/doc/qemu/specs/searchindex.js
+share/icons/hicolor/128x128/apps/qemu.png
+share/icons/hicolor/16x16/apps/qemu.png
+share/icons/hicolor/24x24/apps/qemu.png
+share/icons/hicolor/256x256/apps/qemu.png
+share/icons/hicolor/32x32/apps/qemu.bmp
+share/icons/hicolor/32x32/apps/qemu.png
+share/icons/hicolor/48x48/apps/qemu.png
+share/icons/hicolor/512x512/apps/qemu.png
+share/icons/hicolor/64x64/apps/qemu.png
+share/icons/hicolor/scalable/apps/qemu.svg
 share/qemu/QEMU,cgthree.bin
 share/qemu/QEMU,tcx.bin
 share/qemu/bamboo.dtb
 share/qemu/bios-256k.bin
 share/qemu/bios.bin
 share/qemu/canyonlands.dtb
+share/qemu/edk2-aarch64-code.fd
+share/qemu/edk2-arm-code.fd
+share/qemu/edk2-arm-vars.fd
+share/qemu/edk2-i386-code.fd
+share/qemu/edk2-i386-secure-code.fd
+share/qemu/edk2-i386-vars.fd
+share/qemu/edk2-licenses.txt
+share/qemu/edk2-x86_64-code.fd
+share/qemu/edk2-x86_64-secure-code.fd
 share/qemu/efi-e1000.rom
 share/qemu/efi-e1000e.rom
 share/qemu/efi-eepro100.rom
@@ -34,10 +122,15 @@ share/qemu/efi-pcnet.rom
 share/qemu/efi-rtl8139.rom
 share/qemu/efi-virtio.rom
 share/qemu/efi-vmxnet3.rom
+share/qemu/firmware/50-edk2-i386-secure.json
+share/qemu/firmware/50-edk2-x86_64-secure.json
+share/qemu/firmware/60-edk2-aarch64.json
+share/qemu/firmware/60-edk2-arm.json
+share/qemu/firmware/60-edk2-i386.json
+share/qemu/firmware/60-edk2-x86_64.json
 share/qemu/hppa-firmware.img
 share/qemu/keymaps/ar
 share/qemu/keymaps/bepo
-share/qemu/keymaps/common
 share/qemu/keymaps/cz
 share/qemu/keymaps/da
 share/qemu/keymaps/de
@@ -60,9 +153,7 @@ share/qemu/keymaps/ja
 share/qemu/keymaps/lt
 share/qemu/keymaps/lv
 share/qemu/keymaps/mk
-share/qemu/keymaps/modifiers
 share/qemu/keymaps/nl
-share/qemu/keymaps/nl-be
 share/qemu/keymaps/no
 share/qemu/keymaps/pl
 share/qemu/keymaps/pt
@@ -79,18 +170,21 @@ share/qemu/multiboot.bin
 share/qemu/openbios-ppc
 share/qemu/openbios-sparc32
 share/qemu/openbios-sparc64
+share/qemu/opensbi-riscv32-virt-fw_jump.bin
+share/qemu/opensbi-riscv64-sifive_u-fw_jump.bin
+share/qemu/opensbi-riscv64-virt-fw_jump.bin
 share/qemu/palcode-clipper
 share/qemu/petalogix-ml605.dtb
 share/qemu/petalogix-s3adsp1800.dtb
 share/qemu/ppc_rom.bin
+share/qemu/pvh.bin
 share/qemu/pxe-e1000.rom
 share/qemu/pxe-eepro100.rom
 share/qemu/pxe-ne2k_pci.rom
 share/qemu/pxe-pcnet.rom
 share/qemu/pxe-rtl8139.rom
 share/qemu/pxe-virtio.rom
-share/qemu/qemu-icon.bmp
-share/qemu/qemu_logo_no_text.svg
+share/qemu/qemu-nsis.bmp
 share/qemu/qemu_vga.ndrv
 share/qemu/s390-ccw.img
 share/qemu/s390-netboot.img
@@ -101,6 +195,7 @@ share/qemu/spapr-rtas.bin
 share/qemu/trace-events-all
 share/qemu/u-boot-sam460-20100605.bin
 share/qemu/u-boot.e500
+share/qemu/vgabios-ati.bin
 share/qemu/vgabios-bochs-display.bin
 share/qemu/vgabios-cirrus.bin
 share/qemu/vgabios-qxl.bin
diff --git a/qemu-nvmm/distinfo b/qemu-nvmm/distinfo
index 9de323c90c..a9c08acd66 100644
--- a/qemu-nvmm/distinfo
+++ b/qemu-nvmm/distinfo
@@ -1,22 +1,16 @@
 $NetBSD: distinfo,v 1.141 2019/01/31 13:39:10 martin Exp $
 
-SHA1 (qemu-3.1.0.tar.xz) = 3ed63c0c05abc8c8ec075dac2688c229f139a5da
-RMD160 (qemu-3.1.0.tar.xz) = 7650d76b8578ee2c31cef048c7929b30c607b83d
-SHA512 (qemu-3.1.0.tar.xz) = 7e8dae823937cfac2f0c60406bd3bdcb89df40313dab2a4bed327d5198f7fcc68ac8b31e44692caa09299cc71256ee0b8c17e4f49f78ada8043d424f5daf82fe
-Size (qemu-3.1.0.tar.xz) = 36070104 bytes
-SHA1 (patch-Makefile) = b3899fb8d0dd2f29bf3edd843836612e6e6c019c
+SHA1 (qemu-4.1.0.tar.xz) = 29c99be326cd8f3b2b75d7fec9066ca24854df1e
+RMD160 (qemu-4.1.0.tar.xz) = 7f95536777579cec1793c894089a2f72bb7f09d8
+SHA512 (qemu-4.1.0.tar.xz) = 82fd51702a7b9b1b00b2f1bd3b4a832b80249018dbba1add0b0a73e7d4bee452afd45574b4d8df7ce4477d8711f3bda4ca072a1a6de25895c93eb21cf78fc4b2
+Size (qemu-4.1.0.tar.xz) = 54001708 bytes
+SHA1 (patch-Makefile) = 85d24d842ad2f7e1e2ec6f0e0e3268c21ef9bf0d
 SHA1 (patch-audio_audio.c) = 98a1de2fd48638886b5d16f6a61dc72910e98b41
-SHA1 (patch-block.c) = 5eb15a87d6646719bf1e9277fbe73a99e4905481
 SHA1 (patch-contrib_ivshmem-client_ivshmem-client.c) = 40c8751607cbf66a37e4c4e08f2664b864e2e984
 SHA1 (patch-contrib_ivshmem-server_ivshmem-server.c) = d8f53432b5752f4263dc4ef96108a976a05147a3
-SHA1 (patch-hw_arm_boot.c) = bd28e4b8e8732a2b01ba1d0e8a727e8e7bc5227a
-SHA1 (patch-hw_core_loader.c) = 06ff8bfa5be720e428668987598d55b6799202e7
-SHA1 (patch-hw_core_uboot__image.h) = 26a656310d991747b7080b9f28042afd536e4c28
+SHA1 (patch-hw_core_uboot__image.h) = 17eef02349343c5fcfb7a4069cb6f8fd11efcb59
 SHA1 (patch-hw_display_omap__dss.c) = 6b13242f28e32346bc70548c216c578d98fd3420
 SHA1 (patch-hw_net_etraxfs__eth.c) = e5dd1661d60dbcd27b332403e0843500ba9544bc
 SHA1 (patch-hw_net_xilinx__axienet.c) = ebcd2676d64ce6f31e4a8c976d4fdf530ad5e8b7
-SHA1 (patch-hw_usb_dev-mtp.c) = 66543b5559d92f8e2fa9a6eb85e5dfe7c1ad3339
-SHA1 (patch-nvmm-support) = af8920e486439e106fe3ffce43738c124f1c4d86
-SHA1 (patch-target_arm_cpu.h) = 0f70a35900c7cc3124dc11969643e0eef6ad6af5
-SHA1 (patch-target_arm_helper.c) = 08f9425422080442a2c90bb252423bab38651ae4
-SHA1 (patch-tests_Makefile.include) = 42345d697cb2e324dccf1d68bd8d61e8001c6162
+SHA1 (patch-hw_usb_dev-mtp.c) = 0f9034fb3904e5d5e3b98d24b94e054181687d95
+SHA1 (patch-nvmm-support) = bca46ceb13fb74d4d299c8740b920d06b44a19de
diff --git a/qemu-nvmm/files/accel_stubs_nvmm-stub.c b/qemu-nvmm/files/accel_stubs_nvmm-stub.c
new file mode 100644
index 0000000000..c2208b84a3
--- /dev/null
+++ b/qemu-nvmm/files/accel_stubs_nvmm-stub.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator stub.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "sysemu/nvmm.h"
+
+int nvmm_init_vcpu(CPUState *cpu)
+{
+    return -1;
+}
+
+int nvmm_vcpu_exec(CPUState *cpu)
+{
+    return -1;
+}
+
+void nvmm_destroy_vcpu(CPUState *cpu)
+{
+}
+
+void nvmm_cpu_synchronize_state(CPUState *cpu)
+{
+}
+
+void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
+{
+}
+
+void nvmm_cpu_synchronize_post_init(CPUState *cpu)
+{
+}
+
+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+}
diff --git a/qemu-nvmm/files/include_sysemu_nvmm.h b/qemu-nvmm/files/include_sysemu_nvmm.h
new file mode 100644
index 0000000000..10496f3980
--- /dev/null
+++ b/qemu-nvmm/files/include_sysemu_nvmm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator support.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_NVMM_H
+#define QEMU_NVMM_H
+
+#include "config-host.h"
+#include "qemu-common.h"
+
+int nvmm_init_vcpu(CPUState *);
+int nvmm_vcpu_exec(CPUState *);
+void nvmm_destroy_vcpu(CPUState *);
+
+void nvmm_cpu_synchronize_state(CPUState *);
+void nvmm_cpu_synchronize_post_reset(CPUState *);
+void nvmm_cpu_synchronize_post_init(CPUState *);
+void nvmm_cpu_synchronize_pre_loadvm(CPUState *);
+
+#ifdef CONFIG_NVMM
+
+int nvmm_enabled(void);
+
+#else /* CONFIG_NVMM */
+
+#define nvmm_enabled() (0)
+
+#endif /* CONFIG_NVMM */
+
+#endif /* CONFIG_NVMM */
diff --git a/qemu-nvmm/files/target_i386_nvmm-all.c b/qemu-nvmm/files/target_i386_nvmm-all.c
new file mode 100644
index 0000000000..4ac3b6610e
--- /dev/null
+++ b/qemu-nvmm/files/target_i386_nvmm-all.c
@@ -0,0 +1,1168 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "exec/ioport.h"
+#include "qemu-common.h"
+#include "strings.h"
+#include "sysemu/accel.h"
+#include "sysemu/nvmm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/cpus.h"
+#include "qemu/main-loop.h"
+#include "hw/boards.h"
+#include "qemu/error-report.h"
+#include "qemu/queue.h"
+#include "qapi/error.h"
+#include "migration/blocker.h"
+
+#include <nvmm.h>
+
+struct qemu_vcpu {
+    struct nvmm_vcpu vcpu;
+    uint8_t tpr;
+    bool stop;
+
+    /* Window-exiting for INTs/NMIs. */
+    bool int_window_exit;
+    bool nmi_window_exit;
+
+    /* The guest is in an interrupt shadow (POP SS, etc). */
+    bool int_shadow;
+};
+
+struct qemu_machine {
+    struct nvmm_machine mach;
+};
+
+/* -------------------------------------------------------------------------- */
+
+static bool nvmm_allowed = false;
+static struct qemu_machine qemu_mach;
+
+static struct qemu_vcpu *
+get_qemu_vcpu(CPUState *cpu)
+{
+    return (struct qemu_vcpu *)cpu->hax_vcpu;
+}
+
+static struct nvmm_machine *
+get_nvmm_mach(void)
+{
+    return &qemu_mach.mach;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg)
+{
+    uint32_t attrib = qseg->flags;
+
+    nseg->selector = qseg->selector;
+    nseg->limit = qseg->limit;
+    nseg->base = qseg->base;
+    nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK);
+    nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK);
+    nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK);
+    nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK);
+    nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK);
+    nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK);
+    nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK);
+    nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK);
+}
+
+static void
+nvmm_set_registers(CPUState *cpu)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    struct nvmm_machine *mach = get_nvmm_mach();
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    struct nvmm_x64_state *state = vcpu->state;
+    uint64_t bitmap;
+    size_t i;
+    int ret;
+
+    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+    /* GPRs. */
+    state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX];
+    state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX];
+    state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX];
+    state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX];
+    state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP];
+    state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP];
+    state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI];
+    state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI];
+    state->gprs[NVMM_X64_GPR_R8]  = env->regs[R_R8];
+    state->gprs[NVMM_X64_GPR_R9]  = env->regs[R_R9];
+    state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10];
+    state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11];
+    state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12];
+    state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13];
+    state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14];
+    state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15];
+
+    /* RIP and RFLAGS. */
+    state->gprs[NVMM_X64_GPR_RIP] = env->eip;
+    state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags;
+
+    /* Segments. */
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
+
+    /* Special segments. */
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr);
+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt);
+
+    /* Control registers. */
+    state->crs[NVMM_X64_CR_CR0] = env->cr[0];
+    state->crs[NVMM_X64_CR_CR2] = env->cr[2];
+    state->crs[NVMM_X64_CR_CR3] = env->cr[3];
+    state->crs[NVMM_X64_CR_CR4] = env->cr[4];
+    state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
+    state->crs[NVMM_X64_CR_XCR0] = env->xcr0;
+
+    /* Debug registers. */
+    state->drs[NVMM_X64_DR_DR0] = env->dr[0];
+    state->drs[NVMM_X64_DR_DR1] = env->dr[1];
+    state->drs[NVMM_X64_DR_DR2] = env->dr[2];
+    state->drs[NVMM_X64_DR_DR3] = env->dr[3];
+    state->drs[NVMM_X64_DR_DR6] = env->dr[6];
+    state->drs[NVMM_X64_DR_DR7] = env->dr[7];
+
+    /* FPU. */
+    state->fpu.fx_cw = env->fpuc;
+    state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
+    state->fpu.fx_tw = 0;
+    for (i = 0; i < 8; i++) {
+        state->fpu.fx_tw |= (!env->fptags[i]) << i;
+    }
+    state->fpu.fx_opcode = env->fpop;
+    state->fpu.fx_ip.fa_64 = env->fpip;
+    state->fpu.fx_dp.fa_64 = env->fpdp;
+    state->fpu.fx_mxcsr = env->mxcsr;
+    state->fpu.fx_mxcsr_mask = 0x0000FFFF;
+    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
+    memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
+    for (i = 0; i < 16; i++) {
+        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0],
+            &env->xmm_regs[i].ZMM_Q(0), 8);
+        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8],
+            &env->xmm_regs[i].ZMM_Q(1), 8);
+    }
+
+    /* MSRs. */
+    state->msrs[NVMM_X64_MSR_EFER] = env->efer;
+    state->msrs[NVMM_X64_MSR_STAR] = env->star;
+#ifdef TARGET_X86_64
+    state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
+    state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
+    state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
+    state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
+#endif
+    state->msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
+    state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
+    state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
+    state->msrs[NVMM_X64_MSR_PAT] = env->pat;
+    state->msrs[NVMM_X64_MSR_TSC] = env->tsc;
+
+    bitmap =
+        NVMM_X64_STATE_SEGS |
+        NVMM_X64_STATE_GPRS |
+        NVMM_X64_STATE_CRS  |
+        NVMM_X64_STATE_DRS  |
+        NVMM_X64_STATE_MSRS |
+        NVMM_X64_STATE_FPU;
+
+    ret = nvmm_vcpu_setstate(mach, vcpu, bitmap);
+    if (ret == -1) {
+        error_report("NVMM: Failed to set virtual processor context,"
+            " error=%d", errno);
+    }
+}
+
+static void
+nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg)
+{
+    qseg->selector = nseg->selector;
+    qseg->limit = nseg->limit;
+    qseg->base = nseg->base;
+
+    qseg->flags =
+        __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) |
+        __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK);
+}
+
+static void
+nvmm_get_registers(CPUState *cpu)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    struct nvmm_machine *mach = get_nvmm_mach();
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    struct nvmm_x64_state *state = vcpu->state;
+    uint64_t bitmap, tpr;
+    size_t i;
+    int ret;
+
+    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+    bitmap =
+        NVMM_X64_STATE_SEGS |
+        NVMM_X64_STATE_GPRS |
+        NVMM_X64_STATE_CRS  |
+        NVMM_X64_STATE_DRS  |
+        NVMM_X64_STATE_MSRS |
+        NVMM_X64_STATE_FPU;
+
+    ret = nvmm_vcpu_getstate(mach, vcpu, bitmap);
+    if (ret == -1) {
+        error_report("NVMM: Failed to get virtual processor context,"
+            " error=%d", errno);
+    }
+
+    /* GPRs. */
+    env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX];
+    env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX];
+    env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX];
+    env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX];
+    env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP];
+    env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP];
+    env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI];
+    env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI];
+    env->regs[R_R8]  = state->gprs[NVMM_X64_GPR_R8];
+    env->regs[R_R9]  = state->gprs[NVMM_X64_GPR_R9];
+    env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10];
+    env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11];
+    env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12];
+    env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13];
+    env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14];
+    env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15];
+
+    /* RIP and RFLAGS. */
+    env->eip = state->gprs[NVMM_X64_GPR_RIP];
+    env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS];
+
+    /* Segments. */
+    nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]);
+    nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]);
+    nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]);
+    nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]);
+    nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]);
+    nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]);
+
+    /* Special segments. */
+    nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]);
+    nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]);
+    nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]);
+    nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]);
+
+    /* Control registers. */
+    env->cr[0] = state->crs[NVMM_X64_CR_CR0];
+    env->cr[2] = state->crs[NVMM_X64_CR_CR2];
+    env->cr[3] = state->crs[NVMM_X64_CR_CR3];
+    env->cr[4] = state->crs[NVMM_X64_CR_CR4];
+    tpr = state->crs[NVMM_X64_CR_CR8];
+    if (tpr != qcpu->tpr) {
+        qcpu->tpr = tpr;
+        cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+    }
+    env->xcr0 = state->crs[NVMM_X64_CR_XCR0];
+
+    /* Debug registers. */
+    env->dr[0] = state->drs[NVMM_X64_DR_DR0];
+    env->dr[1] = state->drs[NVMM_X64_DR_DR1];
+    env->dr[2] = state->drs[NVMM_X64_DR_DR2];
+    env->dr[3] = state->drs[NVMM_X64_DR_DR3];
+    env->dr[6] = state->drs[NVMM_X64_DR_DR6];
+    env->dr[7] = state->drs[NVMM_X64_DR_DR7];
+
+    /* FPU. */
+    env->fpuc = state->fpu.fx_cw;
+    env->fpstt = (state->fpu.fx_sw >> 11) & 0x7;
+    env->fpus = state->fpu.fx_sw & ~0x3800;
+    for (i = 0; i < 8; i++) {
+        env->fptags[i] = !((state->fpu.fx_tw >> i) & 1);
+    }
+    env->fpop = state->fpu.fx_opcode;
+    env->fpip = state->fpu.fx_ip.fa_64;
+    env->fpdp = state->fpu.fx_dp.fa_64;
+    env->mxcsr = state->fpu.fx_mxcsr;
+    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
+    memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs));
+    for (i = 0; i < 16; i++) {
+        memcpy(&env->xmm_regs[i].ZMM_Q(0),
+            &state->fpu.fx_xmm[i].xmm_bytes[0], 8);
+        memcpy(&env->xmm_regs[i].ZMM_Q(1),
+            &state->fpu.fx_xmm[i].xmm_bytes[8], 8);
+    }
+
+    /* MSRs. */
+    env->efer = state->msrs[NVMM_X64_MSR_EFER];
+    env->star = state->msrs[NVMM_X64_MSR_STAR];
+#ifdef TARGET_X86_64
+    env->lstar = state->msrs[NVMM_X64_MSR_LSTAR];
+    env->cstar = state->msrs[NVMM_X64_MSR_CSTAR];
+    env->fmask = state->msrs[NVMM_X64_MSR_SFMASK];
+    env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE];
+#endif
+    env->sysenter_cs  = state->msrs[NVMM_X64_MSR_SYSENTER_CS];
+    env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
+    env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
+    env->pat = state->msrs[NVMM_X64_MSR_PAT];
+    env->tsc = state->msrs[NVMM_X64_MSR_TSC];
+
+    x86_update_hflags(env);
+}
+
+static bool
+nvmm_can_take_int(CPUState *cpu)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    struct nvmm_machine *mach = get_nvmm_mach();
+
+    if (qcpu->int_window_exit) {
+        return false;
+    }
+
+    if (qcpu->int_shadow || (!(env->eflags & IF_MASK))) {
+        struct nvmm_x64_state *state = vcpu->state;
+
+        /* Exit on interrupt window. */
+        nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR);
+        state->intr.int_window_exiting = 1;
+        nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR);
+
+        return false;
+    }
+
+    return true;
+}
+
+static bool
+nvmm_can_take_nmi(CPUState *cpu)
+{
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+
+    /*
+     * Contrary to INTs, NMIs always schedule an exit when they are
+     * completed. Therefore, if window-exiting is enabled, it means
+     * NMIs are blocked.
+     */
+    if (qcpu->nmi_window_exit) {
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Called before the VCPU is run. We inject events generated by the I/O
+ * thread, and synchronize the guest TPR.
+ */
+static void
+nvmm_vcpu_pre_run(CPUState *cpu)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    struct nvmm_machine *mach = get_nvmm_mach();
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    struct nvmm_x64_state *state = vcpu->state;
+    struct nvmm_event *event = vcpu->event;
+    bool has_event = false;
+    bool sync_tpr = false;
+    uint8_t tpr;
+    int ret;
+
+    qemu_mutex_lock_iothread();
+
+    tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
+    if (tpr != qcpu->tpr) {
+        qcpu->tpr = tpr;
+        sync_tpr = true;
+    }
+
+    /*
+     * Force the VCPU out of its inner loop to process any INIT requests
+     * or commit pending TPR access.
+     */
+    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT|CPU_INTERRUPT_TPR)) {
+        cpu->exit_request = 1;
+    }
+
+    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+        if (nvmm_can_take_nmi(cpu)) {
+            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+            event->type = NVMM_EVENT_INTERRUPT_HW;
+            event->vector = 2;
+            has_event = true;
+        }
+    }
+
+    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+        if (nvmm_can_take_int(cpu)) {
+            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+            event->type = NVMM_EVENT_INTERRUPT_HW;
+            event->vector = cpu_get_pic_interrupt(env);
+            has_event = true;
+        }
+    }
+
+    /* Don't want SMIs. */
+    if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+        cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+    }
+
+    if (sync_tpr) {
+        ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS);
+        if (ret == -1) {
+            error_report("NVMM: Failed to get CPU state,"
+                " error=%d", errno);
+        }
+
+        state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
+
+        ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS);
+        if (ret == -1) {
+            error_report("NVMM: Failed to set CPU state,"
+                " error=%d", errno);
+        }
+    }
+
+    if (has_event) {
+        ret = nvmm_vcpu_inject(mach, vcpu);
+        if (ret == -1) {
+            error_report("NVMM: Failed to inject event,"
+                " error=%d", errno);
+        }
+    }
+
+    qemu_mutex_unlock_iothread();
+}
+
+/*
+ * Called after the VCPU ran. We synchronize the host view of the TPR and
+ * RFLAGS.
+ */
+static void
+nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_exit *exit)
+{
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    uint64_t tpr;
+
+    env->eflags = exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS];
+
+    qcpu->int_shadow =
+        exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW];
+    qcpu->int_window_exit =
+        exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT];
+    qcpu->nmi_window_exit =
+        exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT];
+
+    tpr = exit->exitstate[NVMM_X64_EXITSTATE_CR8];
+    if (qcpu->tpr != tpr) {
+        qcpu->tpr = tpr;
+        qemu_mutex_lock_iothread();
+        cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
+        qemu_mutex_unlock_iothread();
+    }
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_io_callback(struct nvmm_io *io)
+{
+    MemTxAttrs attrs = { 0 };
+    int ret;
+
+    ret = address_space_rw(&address_space_io, io->port, attrs, io->data,
+        io->size, !io->in);
+    if (ret != MEMTX_OK) {
+        error_report("NVMM: I/O Transaction Failed "
+            "[%s, port=%lu, size=%zu]", (io->in ? "in" : "out"),
+            io->port, io->size);
+    }
+
+    /* XXX Needed, otherwise infinite loop. */
+    current_cpu->vcpu_dirty = false;
+}
+
+static void
+nvmm_mem_callback(struct nvmm_mem *mem)
+{
+    cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
+
+    /* XXX Needed, otherwise infinite loop. */
+    current_cpu->vcpu_dirty = false;
+}
+
+static struct nvmm_callbacks nvmm_callbacks = {
+    .io = nvmm_io_callback,
+    .mem = nvmm_mem_callback
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int
+nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+    int ret;
+
+    ret = nvmm_assist_mem(mach, vcpu);
+    if (ret == -1) {
+        error_report("NVMM: Mem Assist Failed [gpa=%p]",
+            (void *)vcpu->exit->u.mem.gpa);
+    }
+
+    return ret;
+}
+
+static int
+nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+    int ret;
+
+    ret = nvmm_assist_io(mach, vcpu);
+    if (ret == -1) {
+        error_report("NVMM: I/O Assist Failed [port=%d]",
+            (int)vcpu->exit->u.io.port);
+    }
+
+    return ret;
+}
+
+static int
+nvmm_handle_msr(struct nvmm_machine *mach, CPUState *cpu,
+    struct nvmm_exit *exit)
+{
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    struct nvmm_x64_state *state = vcpu->state;
+    uint64_t val;
+    int ret;
+
+    val = exit->u.msr.val;
+
+    switch (exit->u.msr.msr) {
+    case MSR_IA32_APICBASE:
+        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
+            val = cpu_get_apic_base(x86_cpu->apic_state);
+        } else {
+            cpu_set_apic_base(x86_cpu->apic_state, val);
+        }
+        break;
+    default:
+        // TODO: more MSRs to add?
+        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
+            val = 0;
+        }
+        error_report("NVMM: Unexpected %sMSR 0x%lx [val=0x%lx], ignored",
+            (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) ? "RD" : "WR",
+            exit->u.msr.msr, val);
+        break;
+    }
+
+    ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+    if (ret == -1) {
+        return -1;
+    }
+
+    if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
+        state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
+        state->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
+    }
+    state->gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
+
+    ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+    if (ret == -1) {
+        return -1;
+    }
+
+    return 0;
+}
+
+static int
+nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
+    struct nvmm_exit *exit)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    int ret = 0;
+
+    qemu_mutex_lock_iothread();
+
+    if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+          (env->eflags & IF_MASK)) &&
+        !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cpu->exception_index = EXCP_HLT;
+        cpu->halted = true;
+        ret = 1;
+    }
+
+    qemu_mutex_unlock_iothread();
+
+    return ret;
+}
+
+static int
+nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+    struct nvmm_event *event = vcpu->event;
+
+    event->type = NVMM_EVENT_EXCEPTION;
+    event->vector = 6;
+    event->u.error = 0;
+
+    return nvmm_vcpu_inject(mach, vcpu);
+}
+
+static int
+nvmm_vcpu_loop(CPUState *cpu)
+{
+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+    struct nvmm_machine *mach = get_nvmm_mach();
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+    X86CPU *x86_cpu = X86_CPU(cpu);
+    struct nvmm_exit *exit = vcpu->exit;
+    int ret;
+
+    /*
+     * Some asynchronous events must be handled outside of the inner
+     * VCPU loop. They are handled here.
+     */
+    if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+        nvmm_cpu_synchronize_state(cpu);
+        do_cpu_init(x86_cpu);
+        /* XXX: reset the INT/NMI windows */
+    }
+    if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+        apic_poll_irq(x86_cpu->apic_state);
+    }
+    if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+         (env->eflags & IF_MASK)) ||
+        (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+        cpu->halted = false;
+    }
+    if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+        nvmm_cpu_synchronize_state(cpu);
+        do_cpu_sipi(x86_cpu);
+    }
+    if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+        cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+        nvmm_cpu_synchronize_state(cpu);
+        apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
+            env->tpr_access_type);
+    }
+
+    if (cpu->halted) {
+        cpu->exception_index = EXCP_HLT;
+        atomic_set(&cpu->exit_request, false);
+        return 0;
+    }
+
+    qemu_mutex_unlock_iothread();
+    cpu_exec_start(cpu);
+
+    /*
+     * Inner VCPU loop.
+     */
+    do {
+        if (cpu->vcpu_dirty) {
+            nvmm_set_registers(cpu);
+            cpu->vcpu_dirty = false;
+        }
+
+        if (qcpu->stop) {
+            cpu->exception_index = EXCP_INTERRUPT;
+            qcpu->stop = false;
+            ret = 1;
+            break;
+        }
+
+        nvmm_vcpu_pre_run(cpu);
+
+        if (atomic_read(&cpu->exit_request)) {
+            qemu_cpu_kick_self();
+        }
+
+        ret = nvmm_vcpu_run(mach, vcpu);
+        if (ret == -1) {
+            error_report("NVMM: Failed to exec a virtual processor,"
+                " error=%d", errno);
+            break;
+        }
+
+        nvmm_vcpu_post_run(cpu, exit);
+
+        switch (exit->reason) {
+        case NVMM_EXIT_NONE:
+            break;
+        case NVMM_EXIT_MEMORY:
+            ret = nvmm_handle_mem(mach, vcpu);
+            break;
+        case NVMM_EXIT_IO:
+            ret = nvmm_handle_io(mach, vcpu);
+            break;
+        case NVMM_EXIT_MSR:
+            ret = nvmm_handle_msr(mach, cpu, exit);
+            break;
+        case NVMM_EXIT_INT_READY:
+        case NVMM_EXIT_NMI_READY:
+            break;
+        case NVMM_EXIT_MONITOR:
+        case NVMM_EXIT_MWAIT:
+        case NVMM_EXIT_MWAIT_COND:
+            ret = nvmm_inject_ud(mach, vcpu);
+            break;
+        case NVMM_EXIT_HALTED:
+            ret = nvmm_handle_halted(mach, cpu, exit);
+            break;
+        case NVMM_EXIT_SHUTDOWN:
+            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+            cpu->exception_index = EXCP_INTERRUPT;
+            ret = 1;
+            break;
+        default:
+            error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
+                exit->reason, exit->u.inv.hwcode);
+            nvmm_get_registers(cpu);
+            qemu_mutex_lock_iothread();
+            qemu_system_guest_panicked(cpu_get_crash_info(cpu));
+            qemu_mutex_unlock_iothread();
+            ret = -1;
+            break;
+        }
+    } while (ret == 0);
+
+    cpu_exec_end(cpu);
+    qemu_mutex_lock_iothread();
+    current_cpu = cpu;
+
+    atomic_set(&cpu->exit_request, false);
+
+    return ret < 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+    nvmm_get_registers(cpu);
+    cpu->vcpu_dirty = true;
+}
+
+static void
+do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
+{
+    nvmm_set_registers(cpu);
+    cpu->vcpu_dirty = false;
+}
+
+static void
+do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+{
+    nvmm_set_registers(cpu);
+    cpu->vcpu_dirty = false;
+}
+
+static void
+do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+    cpu->vcpu_dirty = true;
+}
+
+void nvmm_cpu_synchronize_state(CPUState *cpu)
+{
+    if (!cpu->vcpu_dirty) {
+        run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL);
+    }
+}
+
+void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
+{
+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+void nvmm_cpu_synchronize_post_init(CPUState *cpu)
+{
+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static Error *nvmm_migration_blocker;
+
+static void
+nvmm_ipi_signal(int sigcpu)
+{
+    struct qemu_vcpu *qcpu;
+
+    if (current_cpu) {
+        qcpu = get_qemu_vcpu(current_cpu);
+        qcpu->stop = true;
+    }
+}
+
+static void
+nvmm_init_cpu_signals(void)
+{
+    struct sigaction sigact;
+    sigset_t set;
+
+    /* Install the IPI handler. */
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = nvmm_ipi_signal;
+    sigaction(SIG_IPI, &sigact, NULL);
+
+    /* Allow IPIs on the current thread. */
+    sigprocmask(SIG_BLOCK, NULL, &set);
+    sigdelset(&set, SIG_IPI);
+    pthread_sigmask(SIG_SETMASK, &set, NULL);
+}
+
+int
+nvmm_init_vcpu(CPUState *cpu)
+{
+    struct nvmm_machine *mach = get_nvmm_mach();
+    Error *local_error = NULL;
+    struct qemu_vcpu *qcpu;
+    int ret;
+
+    nvmm_init_cpu_signals();
+
+    if (nvmm_migration_blocker == NULL) {
+        error_setg(&nvmm_migration_blocker,
+            "NVMM: Migration not supported");
+
+        (void)migrate_add_blocker(nvmm_migration_blocker, &local_error);
+        if (local_error) {
+            error_report_err(local_error);
+            migrate_del_blocker(nvmm_migration_blocker);
+            error_free(nvmm_migration_blocker);
+            return -EINVAL;
+        }
+    }
+
+    qcpu = g_malloc0(sizeof(*qcpu));
+    if (qcpu == NULL) {
+        error_report("NVMM: Failed to allocate VCPU context.");
+        return -ENOMEM;
+    }
+
+    ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu);
+    if (ret == -1) {
+        error_report("NVMM: Failed to create a virtual processor,"
+            " error=%d", errno);
+        g_free(qcpu);
+        return -EINVAL;
+    }
+
+    cpu->vcpu_dirty = true;
+    cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu;
+
+    return 0;
+}
+
+int
+nvmm_vcpu_exec(CPUState *cpu)
+{
+    int ret, fatal;
+
+    while (1) {
+        if (cpu->exception_index >= EXCP_INTERRUPT) {
+            ret = cpu->exception_index;
+            cpu->exception_index = -1;
+            break;
+        }
+
+        fatal = nvmm_vcpu_loop(cpu);
+
+        if (fatal) {
+            error_report("NVMM: Failed to execute a VCPU.");
+            abort();
+        }
+    }
+
+    return ret;
+}
+
+void
+nvmm_destroy_vcpu(CPUState *cpu)
+{
+    struct nvmm_machine *mach = get_nvmm_mach();
+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+
+    nvmm_vcpu_destroy(mach, &qcpu->vcpu);
+    g_free(cpu->hax_vcpu);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva,
+    bool add, bool rom, const char *name)
+{
+    struct nvmm_machine *mach = get_nvmm_mach();
+    int ret, prot;
+
+    if (add) {
+        prot = PROT_READ | PROT_EXEC;
+        if (!rom) {
+            prot |= PROT_WRITE;
+        }
+        ret = nvmm_gpa_map(mach, hva, start_pa, size, prot);
+    } else {
+        ret = nvmm_gpa_unmap(mach, hva, start_pa, size);
+    }
+
+    if (ret == -1) {
+        error_report("NVMM: Failed to %s GPA range '%s' PA:%p, "
+            "Size:%p bytes, HostVA:%p, error=%d",
+            (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa,
+            (void *)size, (void *)hva, errno);
+    }
+}
+
+static void
+nvmm_process_section(MemoryRegionSection *section, int add)
+{
+    MemoryRegion *mr = section->mr;
+    hwaddr start_pa = section->offset_within_address_space;
+    ram_addr_t size = int128_get64(section->size);
+    unsigned int delta;
+    uintptr_t hva;
+
+    if (!memory_region_is_ram(mr)) {
+        return;
+    }
+
+    /* Adjust start_pa and size so that they are page-aligned. */
+    delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
+    delta &= ~qemu_real_host_page_mask;
+    if (delta > size) {
+        return;
+    }
+    start_pa += delta;
+    size -= delta;
+    size &= qemu_real_host_page_mask;
+    if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+        return;
+    }
+
+    hva = (uintptr_t)memory_region_get_ram_ptr(mr) +
+        section->offset_within_region + delta;
+
+    nvmm_update_mapping(start_pa, size, hva, add,
+        memory_region_is_rom(mr), mr->name);
+}
+
+static void
+nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section)
+{
+    memory_region_ref(section->mr);
+    nvmm_process_section(section, 1);
+}
+
+static void
+nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section)
+{
+    nvmm_process_section(section, 0);
+    memory_region_unref(section->mr);
+}
+
+static void
+nvmm_transaction_begin(MemoryListener *listener)
+{
+    /* nothing */
+}
+
+static void
+nvmm_transaction_commit(MemoryListener *listener)
+{
+    /* nothing */
+}
+
+static void
+nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section)
+{
+    MemoryRegion *mr = section->mr;
+
+    if (!memory_region_is_ram(mr)) {
+        return;
+    }
+
+    memory_region_set_dirty(mr, 0, int128_get64(section->size));
+}
+
+static MemoryListener nvmm_memory_listener = {
+    .begin = nvmm_transaction_begin,
+    .commit = nvmm_transaction_commit,
+    .region_add = nvmm_region_add,
+    .region_del = nvmm_region_del,
+    .log_sync = nvmm_log_sync,
+    .priority = 10,
+};
+
+static void
+nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
+{
+    struct nvmm_machine *mach = get_nvmm_mach();
+    uintptr_t hva = (uintptr_t)host;
+    int ret;
+
+    ret = nvmm_hva_map(mach, hva, size);
+
+    if (ret == -1) {
+        error_report("NVMM: Failed to map HVA, HostVA:%p "
+            "Size:%p bytes, error=%d",
+            (void *)hva, (void *)size, errno);
+    }
+}
+
+static struct RAMBlockNotifier nvmm_ram_notifier = {
+    .ram_block_added = nvmm_ram_block_added
+};
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_handle_interrupt(CPUState *cpu, int mask)
+{
+    cpu->interrupt_request |= mask;
+
+    if (!qemu_cpu_is_self(cpu)) {
+        qemu_cpu_kick(cpu);
+    }
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int
+nvmm_accel_configure(struct nvmm_machine *mach)
+{
+    struct nvmm_mach_conf_x86_cpuid cpuid;
+    int ret;
+
+    memset(&cpuid, 0, sizeof(cpuid));
+    cpuid.leaf = 0x00000001;
+    cpuid.del.edx = CPUID_MCE | CPUID_MCA | CPUID_MTRR;
+
+    ret = nvmm_machine_configure(mach, NVMM_MACH_CONF_X86_CPUID, &cpuid);
+    if (ret == -1)
+        return -1;
+
+    ret = nvmm_machine_configure(mach, NVMM_MACH_CONF_CALLBACKS,
+        &nvmm_callbacks);
+    if (ret == -1)
+        return -1;
+
+    return 0;
+}
+
+static int
+nvmm_accel_init(MachineState *ms)
+{
+    struct nvmm_capability cap;
+    int ret;
+
+    ret = nvmm_capability(&cap);
+    if (ret == -1) {
+        error_report("NVMM: No accelerator found, error=%d", errno);
+        return -ENOSPC;
+    }
+    if (cap.version != 1) {
+        error_report("NVMM: Unsupported version %lu", cap.version);
+        return -ENOSPC;
+    }
+    if (cap.state_size != sizeof(struct nvmm_x64_state)) {
+        error_report("NVMM: Wrong state size %zu", cap.state_size);
+        return -ENOSPC;
+    }
+
+    ret = nvmm_machine_create(&qemu_mach.mach);
+    if (ret == -1) {
+        error_report("NVMM: Machine creation failed, error=%d", errno);
+        return -ENOSPC;
+    }
+
+    ret = nvmm_accel_configure(&qemu_mach.mach);
+    if (ret == -1) {
+        error_report("NVMM: Machine configuration failed, error=%d",
+            errno);
+        return -ENOSPC;
+    }
+
+    memory_listener_register(&nvmm_memory_listener, &address_space_memory);
+    ram_block_notifier_add(&nvmm_ram_notifier);
+
+    cpu_interrupt_handler = nvmm_handle_interrupt;
+
+    printf("NetBSD Virtual Machine Monitor accelerator is operational\n");
+    return 0;
+}
+
+int
+nvmm_enabled(void)
+{
+    return nvmm_allowed;
+}
+
+static void
+nvmm_accel_class_init(ObjectClass *oc, void *data)
+{
+    AccelClass *ac = ACCEL_CLASS(oc);
+    ac->name = "NVMM";
+    ac->init_machine = nvmm_accel_init;
+    ac->allowed = &nvmm_allowed;
+}
+
+static const TypeInfo nvmm_accel_type = {
+    .name = ACCEL_CLASS_NAME("nvmm"),
+    .parent = TYPE_ACCEL,
+    .class_init = nvmm_accel_class_init,
+};
+
+static void
+nvmm_type_init(void)
+{
+    type_register_static(&nvmm_accel_type);
+}
+
+type_init(nvmm_type_init);
diff --git a/qemu-nvmm/patches/patch-Makefile b/qemu-nvmm/patches/patch-Makefile
index 76fa15ebb6..5053850250 100644
--- a/qemu-nvmm/patches/patch-Makefile
+++ b/qemu-nvmm/patches/patch-Makefile
@@ -1,12 +1,12 @@
-$NetBSD: patch-Makefile,v 1.1 2017/12/14 02:03:53 kamil Exp $
+$NetBSD: patch-Makefile,v 1.2 2019/08/16 15:12:19 adam Exp $
 
---- Makefile.orig	2017-12-13 17:27:20.000000000 +0000
+--- Makefile.orig	2019-08-15 19:01:42.000000000 +0000
 +++ Makefile
-@@ -385,6 +385,7 @@ subdir-%:
+@@ -474,6 +474,7 @@ $(TARGET_DIRS_RULES):
  DTC_MAKE_ARGS=-I$(SRC_PATH)/dtc VPATH=$(SRC_PATH)/dtc -C dtc V="$(V)" LIBFDT_srcdir=$(SRC_PATH)/dtc/libfdt
  DTC_CFLAGS=$(CFLAGS) $(QEMU_CFLAGS)
  DTC_CPPFLAGS=-I$(BUILD_DIR)/dtc -I$(SRC_PATH)/dtc -I$(SRC_PATH)/dtc/libfdt
 +ARFLAGS=	-rcs
  
- subdir-dtc: .git-submodule-status dtc/libfdt dtc/tests
- 	$(call quiet-command,$(MAKE) $(DTC_MAKE_ARGS) CPPFLAGS="$(DTC_CPPFLAGS)" CFLAGS="$(DTC_CFLAGS)" LDFLAGS="$(LDFLAGS)" ARFLAGS="$(ARFLAGS)" CC="$(CC)" AR="$(AR)" LD="$(LD)" $(SUBDIR_MAKEFLAGS) libfdt/libfdt.a,)
+ .PHONY: dtc/all
+ dtc/all: .git-submodule-status dtc/libfdt dtc/tests
diff --git a/qemu-nvmm/patches/patch-block.c b/qemu-nvmm/patches/patch-block.c
deleted file mode 100644
index c32359b910..0000000000
--- a/qemu-nvmm/patches/patch-block.c
+++ /dev/null
@@ -1,42 +0,0 @@
-$NetBSD: patch-block.c,v 1.2 2018/04/25 07:56:05 adam Exp $
-
-Remove block driver whitelisting logic; reasons being:
-- PkgSrc does not configure Qemu to use whitelisting
-- sometimes CONFIG...WHITELIST macros contain ["", NULL],
-  and bdrv_is_whitelisted() fails.
-
---- block.c.orig	2018-03-27 22:29:23.000000000 +0000
-+++ block.c
-@@ -373,31 +373,7 @@ BlockDriver *bdrv_find_format(const char
- 
- int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
- {
--    static const char *whitelist_rw[] = {
--        CONFIG_BDRV_RW_WHITELIST
--    };
--    static const char *whitelist_ro[] = {
--        CONFIG_BDRV_RO_WHITELIST
--    };
--    const char **p;
--
--    if (!whitelist_rw[0] && !whitelist_ro[0]) {
--        return 1;               /* no whitelist, anything goes */
--    }
--
--    for (p = whitelist_rw; *p; p++) {
--        if (!strcmp(drv->format_name, *p)) {
--            return 1;
--        }
--    }
--    if (read_only) {
--        for (p = whitelist_ro; *p; p++) {
--            if (!strcmp(drv->format_name, *p)) {
--                return 1;
--            }
--        }
--    }
--    return 0;
-+    return 1;
- }
- 
- bool bdrv_uses_whitelist(void)
diff --git a/qemu-nvmm/patches/patch-hw_arm_boot.c b/qemu-nvmm/patches/patch-hw_arm_boot.c
deleted file mode 100644
index 60b69f66a8..0000000000
--- a/qemu-nvmm/patches/patch-hw_arm_boot.c
+++ /dev/null
@@ -1,26 +0,0 @@
-$NetBSD: patch-hw_arm_boot.c,v 1.1 2018/11/05 07:27:59 skrll Exp $
-
---- hw/arm/boot.c.orig	2018-11-04 17:27:47.000000000 +0000
-+++ hw/arm/boot.c
-@@ -29,8 +29,9 @@
-  * Documentation/arm/Booting and Documentation/arm64/booting.txt
-  * They have different preferred image load offsets from system RAM base.
-  */
--#define KERNEL_ARGS_ADDR 0x100
--#define KERNEL_LOAD_ADDR 0x00010000
-+#define KERNEL_ARGS_ADDR   0x100
-+#define KERNEL_NOLOAD_ADDR 0x00000000
-+#define KERNEL_LOAD_ADDR   0x00010000
- #define KERNEL64_LOAD_ADDR 0x00080000
- 
- #define ARM64_TEXT_OFFSET_OFFSET    8
-@@ -1049,7 +1050,8 @@ void arm_load_kernel(ARMCPU *cpu, struct
-     }
-     entry = elf_entry;
-     if (kernel_size < 0) {
--        kernel_size = load_uimage_as(info->kernel_filename, &entry, NULL,
-+        uint64_t loadaddr = info->loader_start + KERNEL_NOLOAD_ADDR;
-+        kernel_size = load_uimage_as(info->kernel_filename, &entry, &loadaddr,
-                                      &is_linux, NULL, NULL, as);
-     }
-     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && kernel_size < 0) {
diff --git a/qemu-nvmm/patches/patch-hw_core_loader.c b/qemu-nvmm/patches/patch-hw_core_loader.c
deleted file mode 100644
index 700792064e..0000000000
--- a/qemu-nvmm/patches/patch-hw_core_loader.c
+++ /dev/null
@@ -1,27 +0,0 @@
-$NetBSD: patch-hw_core_loader.c,v 1.1 2018/11/05 07:27:59 skrll Exp $
-
---- hw/core/loader.c.orig	2018-08-14 19:10:34.000000000 +0000
-+++ hw/core/loader.c
-@@ -637,13 +637,19 @@ static int load_uboot_image(const char *
-         goto out;
- 
-     if (hdr->ih_type != image_type) {
--        fprintf(stderr, "Wrong image type %d, expected %d\n", hdr->ih_type,
--                image_type);
--        goto out;
-+        if (image_type != IH_TYPE_KERNEL && hdr->ih_type != IH_TYPE_KERNEL_NOLOAD) {
-+            fprintf(stderr, "Wrong image type %d, expected %d\n", hdr->ih_type,
-+                    image_type);
-+            goto out;
-+        }
-     }
- 
-     /* TODO: Implement other image types.  */
-     switch (hdr->ih_type) {
-+    case IH_TYPE_KERNEL_NOLOAD:
-+        hdr->ih_load = *loadaddr + sizeof(*hdr);
-+        hdr->ih_ep += hdr->ih_load;
-+
-     case IH_TYPE_KERNEL:
-         address = hdr->ih_load;
-         if (translate_fn) {
diff --git a/qemu-nvmm/patches/patch-hw_core_uboot__image.h b/qemu-nvmm/patches/patch-hw_core_uboot__image.h
index a9a005762b..60c80cba3d 100644
--- a/qemu-nvmm/patches/patch-hw_core_uboot__image.h
+++ b/qemu-nvmm/patches/patch-hw_core_uboot__image.h
@@ -1,6 +1,6 @@
-$NetBSD: patch-hw_core_uboot__image.h,v 1.1 2018/11/05 07:27:59 skrll Exp $
+$NetBSD: patch-hw_core_uboot__image.h,v 1.2 2019/04/24 13:59:32 ryoon Exp $
 
---- hw/core/uboot_image.h.orig	2018-08-14 19:10:34.000000000 +0000
+--- hw/core/uboot_image.h.orig	2019-04-23 18:14:45.000000000 +0000
 +++ hw/core/uboot_image.h
 @@ -75,6 +75,7 @@
  #define IH_CPU_NIOS2		15	/* Nios-II	*/
@@ -10,11 +10,3 @@ $NetBSD: patch-hw_core_uboot__image.h,v 1.1 2018/11/05 07:27:59 skrll Exp $
  
  /*
   * Image Types
-@@ -124,6 +125,7 @@
- #define IH_TYPE_SCRIPT		6	/* Script file			*/
- #define IH_TYPE_FILESYSTEM	7	/* Filesystem Image (any type)	*/
- #define IH_TYPE_FLATDT		8	/* Binary Flat Device Tree Blob	*/
-+#define IH_TYPE_KERNEL_NOLOAD  14	/* OS Kernel Image (noload)	*/
- 
- /*
-  * Compression Types
diff --git a/qemu-nvmm/patches/patch-hw_usb_dev-mtp.c b/qemu-nvmm/patches/patch-hw_usb_dev-mtp.c
index 6358e81a2b..ef27d99303 100644
--- a/qemu-nvmm/patches/patch-hw_usb_dev-mtp.c
+++ b/qemu-nvmm/patches/patch-hw_usb_dev-mtp.c
@@ -1,12 +1,12 @@
-$NetBSD: patch-hw_usb_dev-mtp.c,v 1.2 2018/08/16 10:15:09 adam Exp $
+$NetBSD: patch-hw_usb_dev-mtp.c,v 1.4 2019/06/28 17:11:14 jperkin Exp $
 
-Support NAME_MAX.
+Support NAME_MAX and compat for O_DIRECTORY.
 
---- hw/usb/dev-mtp.c.orig	2018-08-14 19:10:34.000000000 +0000
+--- hw/usb/dev-mtp.c.orig	2019-04-23 18:14:46.000000000 +0000
 +++ hw/usb/dev-mtp.c
 @@ -26,6 +26,10 @@
- #include "hw/usb.h"
  #include "desc.h"
+ #include "qemu/units.h"
  
 +#ifndef NAME_MAX
 +#define NAME_MAX 255
@@ -15,3 +15,13 @@ Support NAME_MAX.
  /* ----------------------------------------------------------------------- */
  
  enum mtp_container_type {
+@@ -614,6 +618,9 @@ static void usb_mtp_object_readdir(MTPSt
+     }
+     o->have_children = true;
+ 
++#ifndef O_DIRECTORY
++#define O_DIRECTORY	0
++#endif
+     fd = open(o->path, O_DIRECTORY | O_CLOEXEC | O_NOFOLLOW);
+     if (fd < 0) {
+         return;
diff --git a/qemu-nvmm/patches/patch-nvmm-support b/qemu-nvmm/patches/patch-nvmm-support
index 852e054eee..e67fac5dc5 100644
--- a/qemu-nvmm/patches/patch-nvmm-support
+++ b/qemu-nvmm/patches/patch-nvmm-support
@@ -1,9 +1,9 @@
-$NetBSD: patch-nvmm_support,v 1.1 2018/10/29 00:00:00 maxv Exp $
+$NetBSD$
 
 Add NVMM support.
 
---- accel/stubs/Makefile.objs	2018-12-11 18:44:34.000000000 +0100
-+++ accel/stubs/Makefile.objs	2019-05-01 11:53:33.068579985 +0200
+--- accel/stubs/Makefile.objs.orig	2019-08-15 19:01:42.000000000 +0000
++++ accel/stubs/Makefile.objs
 @@ -1,5 +1,6 @@
  obj-$(call lnot,$(CONFIG_HAX))  += hax-stub.o
  obj-$(call lnot,$(CONFIG_HVF))  += hvf-stub.o
@@ -11,55 +11,9 @@ Add NVMM support.
 +obj-$(call lnot,$(CONFIG_NVMM)) += nvmm-stub.o
  obj-$(call lnot,$(CONFIG_KVM))  += kvm-stub.o
  obj-$(call lnot,$(CONFIG_TCG))  += tcg-stub.o
---- accel/stubs/nvmm-stub.c	1970-01-01 01:00:00.000000000 +0100
-+++ accel/stubs/nvmm-stub.c	2019-05-01 11:53:33.087579596 +0200
-@@ -0,0 +1,43 @@
-+/*
-+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
-+ *
-+ * NetBSD Virtual Machine Monitor (NVMM) accelerator stub.
-+ *
-+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
-+ * See the COPYING file in the top-level directory.
-+ */
-+
-+#include "qemu/osdep.h"
-+#include "qemu-common.h"
-+#include "cpu.h"
-+#include "sysemu/nvmm.h"
-+
-+int nvmm_init_vcpu(CPUState *cpu)
-+{
-+    return -1;
-+}
-+
-+int nvmm_vcpu_exec(CPUState *cpu)
-+{
-+    return -1;
-+}
-+
-+void nvmm_destroy_vcpu(CPUState *cpu)
-+{
-+}
-+
-+void nvmm_cpu_synchronize_state(CPUState *cpu)
-+{
-+}
-+
-+void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
-+{
-+}
-+
-+void nvmm_cpu_synchronize_post_init(CPUState *cpu)
-+{
-+}
-+
-+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
-+{
-+}
---- configure	2018-12-11 18:44:34.000000000 +0100
-+++ configure	2019-05-01 11:53:33.090579534 +0200
-@@ -237,6 +237,17 @@
+--- configure.orig	2019-08-15 19:01:42.000000000 +0000
++++ configure
+@@ -240,6 +240,17 @@ supported_whpx_target() {
      return 1
  }
  
@@ -67,7 +21,7 @@ Add NVMM support.
 +    test "$nvmm" = "yes" || return 1
 +    glob "$1" "*-softmmu" || return 1
 +    case "${1%-softmmu}" in
-+        x86_64)
++        i386|x86_64)
 +            return 0
 +        ;;
 +    esac
@@ -77,7 +31,7 @@ Add NVMM support.
  supported_target() {
      case "$1" in
          *-softmmu)
-@@ -264,6 +275,7 @@
+@@ -267,6 +278,7 @@ supported_target() {
      supported_hax_target "$1" && return 0
      supported_hvf_target "$1" && return 0
      supported_whpx_target "$1" && return 0
@@ -85,7 +39,7 @@ Add NVMM support.
      print_error "TCG disabled, but hardware accelerator not available for '$target'"
      return 1
  }
-@@ -375,6 +387,7 @@
+@@ -386,6 +398,7 @@ kvm="no"
  hax="no"
  hvf="no"
  whpx="no"
@@ -93,7 +47,7 @@ Add NVMM support.
  rdma=""
  pvrdma=""
  gprof="no"
-@@ -1143,6 +1156,10 @@
+@@ -1187,6 +1200,10 @@ for opt do
    ;;
    --enable-whpx) whpx="yes"
    ;;
@@ -104,7 +58,7 @@ Add NVMM support.
    --disable-tcg-interpreter) tcg_interpreter="no"
    ;;
    --enable-tcg-interpreter) tcg_interpreter="yes"
-@@ -1724,6 +1741,7 @@
+@@ -1785,6 +1802,7 @@ disabled with --disable-FEATURE, default
    hax             HAX acceleration support
    hvf             Hypervisor.framework acceleration support
    whpx            Windows Hypervisor Platform acceleration support
@@ -112,15 +66,15 @@ Add NVMM support.
    rdma            Enable RDMA-based migration
    pvrdma          Enable PVRDMA support
    vde             support for vde network
-@@ -2659,6 +2677,20 @@
+@@ -2757,6 +2775,20 @@ if test "$whpx" != "no" ; then
  fi
  
  ##########################################
 +# NetBSD Virtual Machine Monitor (NVMM) accelerator check
 +if test "$nvmm" != "no" ; then
-+    if check_include "nvmm.h"; then
++    if check_include "nvmm.h" ; then
 +        nvmm="yes"
-+        LIBS="-lnvmm $LIBS"
++	LIBS="-lnvmm $LIBS"
 +    else
 +        if test "$nvmm" = "yes"; then
 +            feature_not_found "NVMM" "NVMM is not available"
@@ -133,7 +87,7 @@ Add NVMM support.
  # Sparse probe
  if test "$sparse" != "no" ; then
    if has cgcc; then
-@@ -6033,6 +6065,7 @@
+@@ -6404,6 +6436,7 @@ echo "KVM support       $kvm"
  echo "HAX support       $hax"
  echo "HVF support       $hvf"
  echo "WHPX support      $whpx"
@@ -141,7 +95,7 @@ Add NVMM support.
  echo "TCG support       $tcg"
  if test "$tcg" = "yes" ; then
      echo "TCG debug enabled $debug_tcg"
-@@ -7291,6 +7324,9 @@
+@@ -7717,6 +7750,9 @@ fi
  if supported_whpx_target $target; then
      echo "CONFIG_WHPX=y" >> $config_target_mak
  fi
@@ -151,9 +105,9 @@ Add NVMM support.
  if test "$target_bigendian" = "yes" ; then
    echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
  fi
---- cpus.c	2018-12-11 18:44:34.000000000 +0100
-+++ cpus.c	2019-05-01 11:53:33.092579493 +0200
-@@ -40,6 +40,7 @@
+--- cpus.c.orig	2019-08-15 19:01:42.000000000 +0000
++++ cpus.c
+@@ -41,6 +41,7 @@
  #include "sysemu/hax.h"
  #include "sysemu/hvf.h"
  #include "sysemu/whpx.h"
@@ -161,7 +115,7 @@ Add NVMM support.
  #include "exec/exec-all.h"
  
  #include "qemu/thread.h"
-@@ -1691,6 +1692,48 @@
+@@ -1700,6 +1701,48 @@ static void *qemu_whpx_cpu_thread_fn(voi
      return NULL;
  }
  
@@ -210,7 +164,7 @@ Add NVMM support.
  #ifdef _WIN32
  static void CALLBACK dummy_apc_func(ULONG_PTR unused)
  {
-@@ -2051,6 +2094,19 @@
+@@ -2061,6 +2104,19 @@ static void qemu_whpx_start_vcpu(CPUStat
  #endif
  }
  
@@ -230,7 +184,7 @@ Add NVMM support.
  static void qemu_dummy_start_vcpu(CPUState *cpu)
  {
      char thread_name[VCPU_THREAD_NAME_SIZE];
-@@ -2088,6 +2144,8 @@
+@@ -2101,6 +2157,8 @@ void qemu_init_vcpu(CPUState *cpu)
          qemu_tcg_init_vcpu(cpu);
      } else if (whpx_enabled()) {
          qemu_whpx_start_vcpu(cpu);
@@ -239,8 +193,8 @@ Add NVMM support.
      } else {
          qemu_dummy_start_vcpu(cpu);
      }
---- include/sysemu/hw_accel.h	2018-12-11 18:44:34.000000000 +0100
-+++ include/sysemu/hw_accel.h	2019-05-01 11:53:33.092579493 +0200
+--- include/sysemu/hw_accel.h.orig	2019-08-15 19:01:42.000000000 +0000
++++ include/sysemu/hw_accel.h
 @@ -15,6 +15,7 @@
  #include "sysemu/hax.h"
  #include "sysemu/kvm.h"
@@ -249,7 +203,7 @@ Add NVMM support.
  
  static inline void cpu_synchronize_state(CPUState *cpu)
  {
-@@ -27,6 +28,9 @@
+@@ -27,6 +28,9 @@ static inline void cpu_synchronize_state
      if (whpx_enabled()) {
          whpx_cpu_synchronize_state(cpu);
      }
@@ -259,17 +213,18 @@ Add NVMM support.
  }
  
  static inline void cpu_synchronize_post_reset(CPUState *cpu)
-@@ -40,6 +44,9 @@
+@@ -40,6 +44,10 @@ static inline void cpu_synchronize_post_
      if (whpx_enabled()) {
          whpx_cpu_synchronize_post_reset(cpu);
      }
 +    if (nvmm_enabled()) {
 +        nvmm_cpu_synchronize_post_reset(cpu);
 +    }
++
  }
  
  static inline void cpu_synchronize_post_init(CPUState *cpu)
-@@ -53,6 +60,9 @@
+@@ -53,6 +61,9 @@ static inline void cpu_synchronize_post_
      if (whpx_enabled()) {
          whpx_cpu_synchronize_post_init(cpu);
      }
@@ -279,7 +234,7 @@ Add NVMM support.
  }
  
  static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
-@@ -66,6 +76,9 @@
+@@ -66,6 +77,9 @@ static inline void cpu_synchronize_pre_l
      if (whpx_enabled()) {
          whpx_cpu_synchronize_pre_loadvm(cpu);
      }
@@ -289,1272 +244,44 @@ Add NVMM support.
  }
  
  #endif /* QEMU_HW_ACCEL_H */
---- include/sysemu/nvmm.h	1970-01-01 01:00:00.000000000 +0100
-+++ include/sysemu/nvmm.h	2019-05-01 11:53:33.093579472 +0200
-@@ -0,0 +1,35 @@
-+/*
-+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
-+ *
-+ * NetBSD Virtual Machine Monitor (NVMM) accelerator support.
-+ *
-+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
-+ * See the COPYING file in the top-level directory.
-+ */
-+
-+#ifndef QEMU_NVMM_H
-+#define QEMU_NVMM_H
-+
-+#include "config-host.h"
-+#include "qemu-common.h"
-+
-+int nvmm_init_vcpu(CPUState *);
-+int nvmm_vcpu_exec(CPUState *);
-+void nvmm_destroy_vcpu(CPUState *);
-+
-+void nvmm_cpu_synchronize_state(CPUState *);
-+void nvmm_cpu_synchronize_post_reset(CPUState *);
-+void nvmm_cpu_synchronize_post_init(CPUState *);
-+void nvmm_cpu_synchronize_pre_loadvm(CPUState *);
-+
-+#ifdef CONFIG_NVMM
-+
-+int nvmm_enabled(void);
-+
-+#else /* CONFIG_NVMM */
-+
-+#define nvmm_enabled() (0)
-+
-+#endif /* CONFIG_NVMM */
-+
-+#endif /* CONFIG_NVMM */
---- qemu-options.hx	2018-12-11 18:44:34.000000000 +0100
-+++ qemu-options.hx	2019-05-01 11:53:33.093579472 +0200
-@@ -66,7 +66,7 @@
+--- qemu-options.hx.orig	2019-08-15 19:01:43.000000000 +0000
++++ qemu-options.hx
+@@ -31,7 +31,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_mach
+     "-machine [type=]name[,prop[=value][,...]]\n"
+     "                selects emulated machine ('-machine help' for list)\n"
+     "                property accel=accel1[:accel2[:...]] selects accelerator\n"
+-    "                supported accelerators are kvm, xen, hax, hvf, whpx or tcg (default: tcg)\n"
++    "                supported accelerators are kvm, xen, hax, hvf, nvmm, whpx or tcg (default: tcg)\n"
+     "                kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n"
+     "                vmport=on|off|auto controls emulation of vmport (default: auto)\n"
+     "                kvm_shadow_mem=size of KVM shadow MMU in bytes\n"
+@@ -66,7 +66,7 @@ Supported machine properties are:
  @table @option
  @item accel=@var{accels1}[:@var{accels2}[:...]]
  This is used to enable an accelerator. Depending on the target architecture,
 -kvm, xen, hax, hvf, whpx or tcg can be available. By default, tcg is used. If there is
-+kvm, xen, hax, hvf, whpx, nvmm or tcg can be available. By default, tcg is used. If there is
++kvm, xen, hax, hvf, nvmm, whpx or tcg can be available. By default, tcg is used. If there is
  more than one accelerator specified, the next one is used if the previous one
  fails to initialize.
  @item kernel_irqchip=on|off
-@@ -119,13 +119,13 @@
- 
- DEF("accel", HAS_ARG, QEMU_OPTION_accel,
-     "-accel [accel=]accelerator[,thread=single|multi]\n"
--    "                select accelerator (kvm, xen, hax, hvf, whpx or tcg; use 'help' for a list)\n"
-+    "                select accelerator (kvm, xen, hax, hvf, whpx, nvmm or tcg; use 'help' for a list)\n"
-     "                thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
- STEXI
- @item -accel @var{name}[,prop=@var{value}[,...]]
- @findex -accel
- This is used to enable an accelerator. Depending on the target architecture,
--kvm, xen, hax, hvf, whpx or tcg can be available. By default, tcg is used. If there is
-+kvm, xen, hax, hvf, whpx, nvmm or tcg can be available. By default, tcg is used. If there is
- more than one accelerator specified, the next one is used if the previous one
- fails to initialize.
- @table @option
---- target/i386/helper.c	2018-12-11 18:44:34.000000000 +0100
-+++ target/i386/helper.c	2019-05-01 11:53:33.093579472 +0200
-@@ -986,7 +986,7 @@
-     X86CPU *cpu = x86_env_get_cpu(env);
-     CPUState *cs = CPU(cpu);
+--- target/i386/helper.c.orig	2019-08-15 19:01:43.000000000 +0000
++++ target/i386/helper.c
+@@ -978,7 +978,7 @@ void cpu_report_tpr_access(CPUX86State *
+     X86CPU *cpu = env_archcpu(env);
+     CPUState *cs = env_cpu(env);
  
 -    if (kvm_enabled() || whpx_enabled()) {
 +    if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
          env->tpr_access_type = access;
  
          cpu_interrupt(cs, CPU_INTERRUPT_TPR);
---- target/i386/Makefile.objs	2018-12-11 18:44:34.000000000 +0100
-+++ target/i386/Makefile.objs	2019-05-01 11:53:33.094579452 +0200
-@@ -17,6 +17,7 @@
- obj-$(CONFIG_HVF) += hvf/
+--- target/i386/Makefile.objs.orig	2019-08-15 19:01:43.000000000 +0000
++++ target/i386/Makefile.objs
+@@ -17,6 +17,7 @@ obj-$(CONFIG_HAX) += hax-all.o hax-mem.o
  endif
+ obj-$(CONFIG_HVF) += hvf/
  obj-$(CONFIG_WHPX) += whpx-all.o
 +obj-$(CONFIG_NVMM) += nvmm-all.o
  endif
  obj-$(CONFIG_SEV) += sev.o
  obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o
---- target/i386/nvmm-all.c	1970-01-01 01:00:00.000000000 +0100
-+++ target/i386/nvmm-all.c	2019-08-03 08:50:06.085935090 +0200
-@@ -0,0 +1,1168 @@
-+/*
-+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
-+ *
-+ * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
-+ *
-+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
-+ * See the COPYING file in the top-level directory.
-+ */
-+
-+#include "qemu/osdep.h"
-+#include "cpu.h"
-+#include "exec/address-spaces.h"
-+#include "exec/ioport.h"
-+#include "qemu-common.h"
-+#include "strings.h"
-+#include "sysemu/accel.h"
-+#include "sysemu/nvmm.h"
-+#include "sysemu/sysemu.h"
-+#include "sysemu/cpus.h"
-+#include "qemu/main-loop.h"
-+#include "hw/boards.h"
-+#include "qemu/error-report.h"
-+#include "qemu/queue.h"
-+#include "qapi/error.h"
-+#include "migration/blocker.h"
-+
-+#include <nvmm.h>
-+
-+struct qemu_vcpu {
-+    struct nvmm_vcpu vcpu;
-+    uint8_t tpr;
-+    bool stop;
-+
-+    /* Window-exiting for INTs/NMIs. */
-+    bool int_window_exit;
-+    bool nmi_window_exit;
-+
-+    /* The guest is in an interrupt shadow (POP SS, etc). */
-+    bool int_shadow;
-+};
-+
-+struct qemu_machine {
-+    struct nvmm_machine mach;
-+};
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static bool nvmm_allowed = false;
-+static struct qemu_machine qemu_mach;
-+
-+static struct qemu_vcpu *
-+get_qemu_vcpu(CPUState *cpu)
-+{
-+    return (struct qemu_vcpu *)cpu->hax_vcpu;
-+}
-+
-+static struct nvmm_machine *
-+get_nvmm_mach(void)
-+{
-+    return &qemu_mach.mach;
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static void
-+nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg)
-+{
-+    uint32_t attrib = qseg->flags;
-+
-+    nseg->selector = qseg->selector;
-+    nseg->limit = qseg->limit;
-+    nseg->base = qseg->base;
-+    nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK);
-+    nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK);
-+    nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK);
-+    nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK);
-+    nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK);
-+    nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK);
-+    nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK);
-+    nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK);
-+}
-+
-+static void
-+nvmm_set_registers(CPUState *cpu)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    struct nvmm_x64_state *state = vcpu->state;
-+    uint64_t bitmap;
-+    size_t i;
-+    int ret;
-+
-+    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
-+
-+    /* GPRs. */
-+    state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX];
-+    state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX];
-+    state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX];
-+    state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX];
-+    state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP];
-+    state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP];
-+    state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI];
-+    state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI];
-+    state->gprs[NVMM_X64_GPR_R8]  = env->regs[R_R8];
-+    state->gprs[NVMM_X64_GPR_R9]  = env->regs[R_R9];
-+    state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10];
-+    state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11];
-+    state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12];
-+    state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13];
-+    state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14];
-+    state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15];
-+
-+    /* RIP and RFLAGS. */
-+    state->gprs[NVMM_X64_GPR_RIP] = env->eip;
-+    state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags;
-+
-+    /* Segments. */
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
-+
-+    /* Special segments. */
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr);
-+    nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt);
-+
-+    /* Control registers. */
-+    state->crs[NVMM_X64_CR_CR0] = env->cr[0];
-+    state->crs[NVMM_X64_CR_CR2] = env->cr[2];
-+    state->crs[NVMM_X64_CR_CR3] = env->cr[3];
-+    state->crs[NVMM_X64_CR_CR4] = env->cr[4];
-+    state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
-+    state->crs[NVMM_X64_CR_XCR0] = env->xcr0;
-+
-+    /* Debug registers. */
-+    state->drs[NVMM_X64_DR_DR0] = env->dr[0];
-+    state->drs[NVMM_X64_DR_DR1] = env->dr[1];
-+    state->drs[NVMM_X64_DR_DR2] = env->dr[2];
-+    state->drs[NVMM_X64_DR_DR3] = env->dr[3];
-+    state->drs[NVMM_X64_DR_DR6] = env->dr[6];
-+    state->drs[NVMM_X64_DR_DR7] = env->dr[7];
-+
-+    /* FPU. */
-+    state->fpu.fx_cw = env->fpuc;
-+    state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
-+    state->fpu.fx_tw = 0;
-+    for (i = 0; i < 8; i++) {
-+        state->fpu.fx_tw |= (!env->fptags[i]) << i;
-+    }
-+    state->fpu.fx_opcode = env->fpop;
-+    state->fpu.fx_ip.fa_64 = env->fpip;
-+    state->fpu.fx_dp.fa_64 = env->fpdp;
-+    state->fpu.fx_mxcsr = env->mxcsr;
-+    state->fpu.fx_mxcsr_mask = 0x0000FFFF;
-+    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
-+    memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
-+    for (i = 0; i < 16; i++) {
-+        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0],
-+            &env->xmm_regs[i].ZMM_Q(0), 8);
-+        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8],
-+            &env->xmm_regs[i].ZMM_Q(1), 8);
-+    }
-+
-+    /* MSRs. */
-+    state->msrs[NVMM_X64_MSR_EFER] = env->efer;
-+    state->msrs[NVMM_X64_MSR_STAR] = env->star;
-+#ifdef TARGET_X86_64
-+    state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
-+    state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
-+    state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
-+    state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
-+#endif
-+    state->msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
-+    state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
-+    state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
-+    state->msrs[NVMM_X64_MSR_PAT] = env->pat;
-+    state->msrs[NVMM_X64_MSR_TSC] = env->tsc;
-+
-+    bitmap =
-+        NVMM_X64_STATE_SEGS |
-+        NVMM_X64_STATE_GPRS |
-+        NVMM_X64_STATE_CRS  |
-+        NVMM_X64_STATE_DRS  |
-+        NVMM_X64_STATE_MSRS |
-+        NVMM_X64_STATE_FPU;
-+
-+    ret = nvmm_vcpu_setstate(mach, vcpu, bitmap);
-+    if (ret == -1) {
-+        error_report("NVMM: Failed to set virtual processor context,"
-+            " error=%d", errno);
-+    }
-+}
-+
-+static void
-+nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg)
-+{
-+    qseg->selector = nseg->selector;
-+    qseg->limit = nseg->limit;
-+    qseg->base = nseg->base;
-+
-+    qseg->flags =
-+        __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) |
-+        __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK);
-+}
-+
-+static void
-+nvmm_get_registers(CPUState *cpu)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    X86CPU *x86_cpu = X86_CPU(cpu);
-+    struct nvmm_x64_state *state = vcpu->state;
-+    uint64_t bitmap, tpr;
-+    size_t i;
-+    int ret;
-+
-+    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
-+
-+    bitmap =
-+        NVMM_X64_STATE_SEGS |
-+        NVMM_X64_STATE_GPRS |
-+        NVMM_X64_STATE_CRS  |
-+        NVMM_X64_STATE_DRS  |
-+        NVMM_X64_STATE_MSRS |
-+        NVMM_X64_STATE_FPU;
-+
-+    ret = nvmm_vcpu_getstate(mach, vcpu, bitmap);
-+    if (ret == -1) {
-+        error_report("NVMM: Failed to get virtual processor context,"
-+            " error=%d", errno);
-+    }
-+
-+    /* GPRs. */
-+    env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX];
-+    env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX];
-+    env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX];
-+    env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX];
-+    env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP];
-+    env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP];
-+    env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI];
-+    env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI];
-+    env->regs[R_R8]  = state->gprs[NVMM_X64_GPR_R8];
-+    env->regs[R_R9]  = state->gprs[NVMM_X64_GPR_R9];
-+    env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10];
-+    env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11];
-+    env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12];
-+    env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13];
-+    env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14];
-+    env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15];
-+
-+    /* RIP and RFLAGS. */
-+    env->eip = state->gprs[NVMM_X64_GPR_RIP];
-+    env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS];
-+
-+    /* Segments. */
-+    nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]);
-+    nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]);
-+    nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]);
-+    nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]);
-+    nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]);
-+    nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]);
-+
-+    /* Special segments. */
-+    nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]);
-+    nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]);
-+    nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]);
-+    nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]);
-+
-+    /* Control registers. */
-+    env->cr[0] = state->crs[NVMM_X64_CR_CR0];
-+    env->cr[2] = state->crs[NVMM_X64_CR_CR2];
-+    env->cr[3] = state->crs[NVMM_X64_CR_CR3];
-+    env->cr[4] = state->crs[NVMM_X64_CR_CR4];
-+    tpr = state->crs[NVMM_X64_CR_CR8];
-+    if (tpr != qcpu->tpr) {
-+        qcpu->tpr = tpr;
-+        cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
-+    }
-+    env->xcr0 = state->crs[NVMM_X64_CR_XCR0];
-+
-+    /* Debug registers. */
-+    env->dr[0] = state->drs[NVMM_X64_DR_DR0];
-+    env->dr[1] = state->drs[NVMM_X64_DR_DR1];
-+    env->dr[2] = state->drs[NVMM_X64_DR_DR2];
-+    env->dr[3] = state->drs[NVMM_X64_DR_DR3];
-+    env->dr[6] = state->drs[NVMM_X64_DR_DR6];
-+    env->dr[7] = state->drs[NVMM_X64_DR_DR7];
-+
-+    /* FPU. */
-+    env->fpuc = state->fpu.fx_cw;
-+    env->fpstt = (state->fpu.fx_sw >> 11) & 0x7;
-+    env->fpus = state->fpu.fx_sw & ~0x3800;
-+    for (i = 0; i < 8; i++) {
-+        env->fptags[i] = !((state->fpu.fx_tw >> i) & 1);
-+    }
-+    env->fpop = state->fpu.fx_opcode;
-+    env->fpip = state->fpu.fx_ip.fa_64;
-+    env->fpdp = state->fpu.fx_dp.fa_64;
-+    env->mxcsr = state->fpu.fx_mxcsr;
-+    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
-+    memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs));
-+    for (i = 0; i < 16; i++) {
-+        memcpy(&env->xmm_regs[i].ZMM_Q(0),
-+            &state->fpu.fx_xmm[i].xmm_bytes[0], 8);
-+        memcpy(&env->xmm_regs[i].ZMM_Q(1),
-+            &state->fpu.fx_xmm[i].xmm_bytes[8], 8);
-+    }
-+
-+    /* MSRs. */
-+    env->efer = state->msrs[NVMM_X64_MSR_EFER];
-+    env->star = state->msrs[NVMM_X64_MSR_STAR];
-+#ifdef TARGET_X86_64
-+    env->lstar = state->msrs[NVMM_X64_MSR_LSTAR];
-+    env->cstar = state->msrs[NVMM_X64_MSR_CSTAR];
-+    env->fmask = state->msrs[NVMM_X64_MSR_SFMASK];
-+    env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE];
-+#endif
-+    env->sysenter_cs  = state->msrs[NVMM_X64_MSR_SYSENTER_CS];
-+    env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
-+    env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
-+    env->pat = state->msrs[NVMM_X64_MSR_PAT];
-+    env->tsc = state->msrs[NVMM_X64_MSR_TSC];
-+
-+    x86_update_hflags(env);
-+}
-+
-+static bool
-+nvmm_can_take_int(CPUState *cpu)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+
-+    if (qcpu->int_window_exit) {
-+        return false;
-+    }
-+
-+    if (qcpu->int_shadow || (!(env->eflags & IF_MASK))) {
-+        struct nvmm_x64_state *state = vcpu->state;
-+
-+        /* Exit on interrupt window. */
-+        nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR);
-+        state->intr.int_window_exiting = 1;
-+        nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR);
-+
-+        return false;
-+    }
-+
-+    return true;
-+}
-+
-+static bool
-+nvmm_can_take_nmi(CPUState *cpu)
-+{
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+
-+    /*
-+     * Contrary to INTs, NMIs always schedule an exit when they are
-+     * completed. Therefore, if window-exiting is enabled, it means
-+     * NMIs are blocked.
-+     */
-+    if (qcpu->nmi_window_exit) {
-+        return false;
-+    }
-+
-+    return true;
-+}
-+
-+/*
-+ * Called before the VCPU is run. We inject events generated by the I/O
-+ * thread, and synchronize the guest TPR.
-+ */
-+static void
-+nvmm_vcpu_pre_run(CPUState *cpu)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    X86CPU *x86_cpu = X86_CPU(cpu);
-+    struct nvmm_x64_state *state = vcpu->state;
-+    struct nvmm_event *event = vcpu->event;
-+    bool has_event = false;
-+    bool sync_tpr = false;
-+    uint8_t tpr;
-+    int ret;
-+
-+    qemu_mutex_lock_iothread();
-+
-+    tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
-+    if (tpr != qcpu->tpr) {
-+        qcpu->tpr = tpr;
-+        sync_tpr = true;
-+    }
-+
-+    /*
-+     * Force the VCPU out of its inner loop to process any INIT requests
-+     * or commit pending TPR access.
-+     */
-+    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT|CPU_INTERRUPT_TPR)) {
-+        cpu->exit_request = 1;
-+    }
-+
-+    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+        if (nvmm_can_take_nmi(cpu)) {
-+            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
-+            event->type = NVMM_EVENT_INTERRUPT_HW;
-+            event->vector = 2;
-+            has_event = true;
-+        }
-+    }
-+
-+    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
-+        if (nvmm_can_take_int(cpu)) {
-+            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
-+            event->type = NVMM_EVENT_INTERRUPT_HW;
-+            event->vector = cpu_get_pic_interrupt(env);
-+            has_event = true;
-+        }
-+    }
-+
-+    /* Don't want SMIs. */
-+    if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
-+        cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
-+    }
-+
-+    if (sync_tpr) {
-+        ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS);
-+        if (ret == -1) {
-+            error_report("NVMM: Failed to get CPU state,"
-+                " error=%d", errno);
-+        }
-+
-+        state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
-+
-+        ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS);
-+        if (ret == -1) {
-+            error_report("NVMM: Failed to set CPU state,"
-+                " error=%d", errno);
-+        }
-+    }
-+
-+    if (has_event) {
-+        ret = nvmm_vcpu_inject(mach, vcpu);
-+        if (ret == -1) {
-+            error_report("NVMM: Failed to inject event,"
-+                " error=%d", errno);
-+        }
-+    }
-+
-+    qemu_mutex_unlock_iothread();
-+}
-+
-+/*
-+ * Called after the VCPU ran. We synchronize the host view of the TPR and
-+ * RFLAGS.
-+ */
-+static void
-+nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_exit *exit)
-+{
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    X86CPU *x86_cpu = X86_CPU(cpu);
-+    uint64_t tpr;
-+
-+    env->eflags = exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS];
-+
-+    qcpu->int_shadow =
-+        exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW];
-+    qcpu->int_window_exit =
-+        exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT];
-+    qcpu->nmi_window_exit =
-+        exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT];
-+
-+    tpr = exit->exitstate[NVMM_X64_EXITSTATE_CR8];
-+    if (qcpu->tpr != tpr) {
-+        qcpu->tpr = tpr;
-+        qemu_mutex_lock_iothread();
-+        cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
-+        qemu_mutex_unlock_iothread();
-+    }
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static void
-+nvmm_io_callback(struct nvmm_io *io)
-+{
-+    MemTxAttrs attrs = { 0 };
-+    int ret;
-+
-+    ret = address_space_rw(&address_space_io, io->port, attrs, io->data,
-+        io->size, !io->in);
-+    if (ret != MEMTX_OK) {
-+        error_report("NVMM: I/O Transaction Failed "
-+            "[%s, port=%lu, size=%zu]", (io->in ? "in" : "out"),
-+            io->port, io->size);
-+    }
-+
-+    /* XXX Needed, otherwise infinite loop. */
-+    current_cpu->vcpu_dirty = false;
-+}
-+
-+static void
-+nvmm_mem_callback(struct nvmm_mem *mem)
-+{
-+    cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
-+
-+    /* XXX Needed, otherwise infinite loop. */
-+    current_cpu->vcpu_dirty = false;
-+}
-+
-+static struct nvmm_callbacks nvmm_callbacks = {
-+    .io = nvmm_io_callback,
-+    .mem = nvmm_mem_callback
-+};
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static int
-+nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
-+{
-+    int ret;
-+
-+    ret = nvmm_assist_mem(mach, vcpu);
-+    if (ret == -1) {
-+        error_report("NVMM: Mem Assist Failed [gpa=%p]",
-+            (void *)vcpu->exit->u.mem.gpa);
-+    }
-+
-+    return ret;
-+}
-+
-+static int
-+nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
-+{
-+    int ret;
-+
-+    ret = nvmm_assist_io(mach, vcpu);
-+    if (ret == -1) {
-+        error_report("NVMM: I/O Assist Failed [port=%d]",
-+            (int)vcpu->exit->u.io.port);
-+    }
-+
-+    return ret;
-+}
-+
-+static int
-+nvmm_handle_msr(struct nvmm_machine *mach, CPUState *cpu,
-+    struct nvmm_exit *exit)
-+{
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    X86CPU *x86_cpu = X86_CPU(cpu);
-+    struct nvmm_x64_state *state = vcpu->state;
-+    uint64_t val;
-+    int ret;
-+
-+    val = exit->u.msr.val;
-+
-+    switch (exit->u.msr.msr) {
-+    case MSR_IA32_APICBASE:
-+        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+            val = cpu_get_apic_base(x86_cpu->apic_state);
-+        } else {
-+            cpu_set_apic_base(x86_cpu->apic_state, val);
-+        }
-+        break;
-+    default:
-+        // TODO: more MSRs to add?
-+        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+            val = 0;
-+        }
-+        error_report("NVMM: Unexpected %sMSR 0x%lx [val=0x%lx], ignored",
-+            (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) ? "RD" : "WR",
-+            exit->u.msr.msr, val);
-+        break;
-+    }
-+
-+    ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS);
-+    if (ret == -1) {
-+        return -1;
-+    }
-+
-+    if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+        state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
-+        state->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
-+    }
-+    state->gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
-+
-+    ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
-+    if (ret == -1) {
-+        return -1;
-+    }
-+
-+    return 0;
-+}
-+
-+static int
-+nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
-+    struct nvmm_exit *exit)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    int ret = 0;
-+
-+    qemu_mutex_lock_iothread();
-+
-+    if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
-+          (env->eflags & IF_MASK)) &&
-+        !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+        cpu->exception_index = EXCP_HLT;
-+        cpu->halted = true;
-+        ret = 1;
-+    }
-+
-+    qemu_mutex_unlock_iothread();
-+
-+    return ret;
-+}
-+
-+static int
-+nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
-+{
-+    struct nvmm_event *event = vcpu->event;
-+
-+    event->type = NVMM_EVENT_EXCEPTION;
-+    event->vector = 6;
-+    event->u.error = 0;
-+
-+    return nvmm_vcpu_inject(mach, vcpu);
-+}
-+
-+static int
-+nvmm_vcpu_loop(CPUState *cpu)
-+{
-+    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
-+    X86CPU *x86_cpu = X86_CPU(cpu);
-+    struct nvmm_exit *exit = vcpu->exit;
-+    int ret;
-+
-+    /*
-+     * Some asynchronous events must be handled outside of the inner
-+     * VCPU loop. They are handled here.
-+     */
-+    if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
-+        nvmm_cpu_synchronize_state(cpu);
-+        do_cpu_init(x86_cpu);
-+        /* XXX: reset the INT/NMI windows */
-+    }
-+    if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
-+        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
-+        apic_poll_irq(x86_cpu->apic_state);
-+    }
-+    if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
-+         (env->eflags & IF_MASK)) ||
-+        (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+        cpu->halted = false;
-+    }
-+    if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
-+        nvmm_cpu_synchronize_state(cpu);
-+        do_cpu_sipi(x86_cpu);
-+    }
-+    if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
-+        cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
-+        nvmm_cpu_synchronize_state(cpu);
-+        apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
-+            env->tpr_access_type);
-+    }
-+
-+    if (cpu->halted) {
-+        cpu->exception_index = EXCP_HLT;
-+        atomic_set(&cpu->exit_request, false);
-+        return 0;
-+    }
-+
-+    qemu_mutex_unlock_iothread();
-+    cpu_exec_start(cpu);
-+
-+    /*
-+     * Inner VCPU loop.
-+     */
-+    do {
-+        if (cpu->vcpu_dirty) {
-+            nvmm_set_registers(cpu);
-+            cpu->vcpu_dirty = false;
-+        }
-+
-+        if (qcpu->stop) {
-+            cpu->exception_index = EXCP_INTERRUPT;
-+            qcpu->stop = false;
-+            ret = 1;
-+            break;
-+        }
-+
-+        nvmm_vcpu_pre_run(cpu);
-+
-+        if (atomic_read(&cpu->exit_request)) {
-+            qemu_cpu_kick_self();
-+        }
-+
-+        ret = nvmm_vcpu_run(mach, vcpu);
-+        if (ret == -1) {
-+            error_report("NVMM: Failed to exec a virtual processor,"
-+                " error=%d", errno);
-+            break;
-+        }
-+
-+        nvmm_vcpu_post_run(cpu, exit);
-+
-+        switch (exit->reason) {
-+        case NVMM_EXIT_NONE:
-+            break;
-+        case NVMM_EXIT_MEMORY:
-+            ret = nvmm_handle_mem(mach, vcpu);
-+            break;
-+        case NVMM_EXIT_IO:
-+            ret = nvmm_handle_io(mach, vcpu);
-+            break;
-+        case NVMM_EXIT_MSR:
-+            ret = nvmm_handle_msr(mach, cpu, exit);
-+            break;
-+        case NVMM_EXIT_INT_READY:
-+        case NVMM_EXIT_NMI_READY:
-+            break;
-+        case NVMM_EXIT_MONITOR:
-+        case NVMM_EXIT_MWAIT:
-+        case NVMM_EXIT_MWAIT_COND:
-+            ret = nvmm_inject_ud(mach, vcpu);
-+            break;
-+        case NVMM_EXIT_HALTED:
-+            ret = nvmm_handle_halted(mach, cpu, exit);
-+            break;
-+        case NVMM_EXIT_SHUTDOWN:
-+            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
-+            cpu->exception_index = EXCP_INTERRUPT;
-+            ret = 1;
-+            break;
-+        default:
-+            error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
-+                exit->reason, exit->u.inv.hwcode);
-+            nvmm_get_registers(cpu);
-+            qemu_mutex_lock_iothread();
-+            qemu_system_guest_panicked(cpu_get_crash_info(cpu));
-+            qemu_mutex_unlock_iothread();
-+            ret = -1;
-+            break;
-+        }
-+    } while (ret == 0);
-+
-+    cpu_exec_end(cpu);
-+    qemu_mutex_lock_iothread();
-+    current_cpu = cpu;
-+
-+    atomic_set(&cpu->exit_request, false);
-+
-+    return ret < 0;
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static void
-+do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
-+{
-+    nvmm_get_registers(cpu);
-+    cpu->vcpu_dirty = true;
-+}
-+
-+static void
-+do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
-+{
-+    nvmm_set_registers(cpu);
-+    cpu->vcpu_dirty = false;
-+}
-+
-+static void
-+do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
-+{
-+    nvmm_set_registers(cpu);
-+    cpu->vcpu_dirty = false;
-+}
-+
-+static void
-+do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
-+{
-+    cpu->vcpu_dirty = true;
-+}
-+
-+void nvmm_cpu_synchronize_state(CPUState *cpu)
-+{
-+    if (!cpu->vcpu_dirty) {
-+        run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL);
-+    }
-+}
-+
-+void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
-+{
-+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
-+}
-+
-+void nvmm_cpu_synchronize_post_init(CPUState *cpu)
-+{
-+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
-+}
-+
-+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
-+{
-+    run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static Error *nvmm_migration_blocker;
-+
-+static void
-+nvmm_ipi_signal(int sigcpu)
-+{
-+    struct qemu_vcpu *qcpu;
-+
-+    if (current_cpu) {
-+        qcpu = get_qemu_vcpu(current_cpu);
-+        qcpu->stop = true;
-+    }
-+}
-+
-+static void
-+nvmm_init_cpu_signals(void)
-+{
-+    struct sigaction sigact;
-+    sigset_t set;
-+
-+    /* Install the IPI handler. */
-+    memset(&sigact, 0, sizeof(sigact));
-+    sigact.sa_handler = nvmm_ipi_signal;
-+    sigaction(SIG_IPI, &sigact, NULL);
-+
-+    /* Allow IPIs on the current thread. */
-+    sigprocmask(SIG_BLOCK, NULL, &set);
-+    sigdelset(&set, SIG_IPI);
-+    pthread_sigmask(SIG_SETMASK, &set, NULL);
-+}
-+
-+int
-+nvmm_init_vcpu(CPUState *cpu)
-+{
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    Error *local_error = NULL;
-+    struct qemu_vcpu *qcpu;
-+    int ret;
-+
-+    nvmm_init_cpu_signals();
-+
-+    if (nvmm_migration_blocker == NULL) {
-+        error_setg(&nvmm_migration_blocker,
-+            "NVMM: Migration not supported");
-+
-+        (void)migrate_add_blocker(nvmm_migration_blocker, &local_error);
-+        if (local_error) {
-+            error_report_err(local_error);
-+            migrate_del_blocker(nvmm_migration_blocker);
-+            error_free(nvmm_migration_blocker);
-+            return -EINVAL;
-+        }
-+    }
-+
-+    qcpu = g_malloc0(sizeof(*qcpu));
-+    if (qcpu == NULL) {
-+        error_report("NVMM: Failed to allocate VCPU context.");
-+        return -ENOMEM;
-+    }
-+
-+    ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu);
-+    if (ret == -1) {
-+        error_report("NVMM: Failed to create a virtual processor,"
-+            " error=%d", errno);
-+        g_free(qcpu);
-+        return -EINVAL;
-+    }
-+
-+    cpu->vcpu_dirty = true;
-+    cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu;
-+
-+    return 0;
-+}
-+
-+int
-+nvmm_vcpu_exec(CPUState *cpu)
-+{
-+    int ret, fatal;
-+
-+    while (1) {
-+        if (cpu->exception_index >= EXCP_INTERRUPT) {
-+            ret = cpu->exception_index;
-+            cpu->exception_index = -1;
-+            break;
-+        }
-+
-+        fatal = nvmm_vcpu_loop(cpu);
-+
-+        if (fatal) {
-+            error_report("NVMM: Failed to execute a VCPU.");
-+            abort();
-+        }
-+    }
-+
-+    return ret;
-+}
-+
-+void
-+nvmm_destroy_vcpu(CPUState *cpu)
-+{
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
-+
-+    nvmm_vcpu_destroy(mach, &qcpu->vcpu);
-+    g_free(cpu->hax_vcpu);
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static void
-+nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva,
-+    bool add, bool rom, const char *name)
-+{
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    int ret, prot;
-+
-+    if (add) {
-+        prot = PROT_READ | PROT_EXEC;
-+        if (!rom) {
-+            prot |= PROT_WRITE;
-+        }
-+        ret = nvmm_gpa_map(mach, hva, start_pa, size, prot);
-+    } else {
-+        ret = nvmm_gpa_unmap(mach, hva, start_pa, size);
-+    }
-+
-+    if (ret == -1) {
-+        error_report("NVMM: Failed to %s GPA range '%s' PA:%p, "
-+            "Size:%p bytes, HostVA:%p, error=%d",
-+            (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa,
-+            (void *)size, (void *)hva, errno);
-+    }
-+}
-+
-+static void
-+nvmm_process_section(MemoryRegionSection *section, int add)
-+{
-+    MemoryRegion *mr = section->mr;
-+    hwaddr start_pa = section->offset_within_address_space;
-+    ram_addr_t size = int128_get64(section->size);
-+    unsigned int delta;
-+    uintptr_t hva;
-+
-+    if (!memory_region_is_ram(mr)) {
-+        return;
-+    }
-+
-+    /* Adjust start_pa and size so that they are page-aligned. */
-+    delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
-+    delta &= ~qemu_real_host_page_mask;
-+    if (delta > size) {
-+        return;
-+    }
-+    start_pa += delta;
-+    size -= delta;
-+    size &= qemu_real_host_page_mask;
-+    if (!size || (start_pa & ~qemu_real_host_page_mask)) {
-+        return;
-+    }
-+
-+    hva = (uintptr_t)memory_region_get_ram_ptr(mr) +
-+        section->offset_within_region + delta;
-+
-+    nvmm_update_mapping(start_pa, size, hva, add,
-+        memory_region_is_rom(mr), mr->name);
-+}
-+
-+static void
-+nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section)
-+{
-+    memory_region_ref(section->mr);
-+    nvmm_process_section(section, 1);
-+}
-+
-+static void
-+nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section)
-+{
-+    nvmm_process_section(section, 0);
-+    memory_region_unref(section->mr);
-+}
-+
-+static void
-+nvmm_transaction_begin(MemoryListener *listener)
-+{
-+    /* nothing */
-+}
-+
-+static void
-+nvmm_transaction_commit(MemoryListener *listener)
-+{
-+    /* nothing */
-+}
-+
-+static void
-+nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section)
-+{
-+    MemoryRegion *mr = section->mr;
-+
-+    if (!memory_region_is_ram(mr)) {
-+        return;
-+    }
-+
-+    memory_region_set_dirty(mr, 0, int128_get64(section->size));
-+}
-+
-+static MemoryListener nvmm_memory_listener = {
-+    .begin = nvmm_transaction_begin,
-+    .commit = nvmm_transaction_commit,
-+    .region_add = nvmm_region_add,
-+    .region_del = nvmm_region_del,
-+    .log_sync = nvmm_log_sync,
-+    .priority = 10,
-+};
-+
-+static void
-+nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
-+{
-+    struct nvmm_machine *mach = get_nvmm_mach();
-+    uintptr_t hva = (uintptr_t)host;
-+    int ret;
-+
-+    ret = nvmm_hva_map(mach, hva, size);
-+
-+    if (ret == -1) {
-+        error_report("NVMM: Failed to map HVA, HostVA:%p "
-+            "Size:%p bytes, error=%d",
-+            (void *)hva, (void *)size, errno);
-+    }
-+}
-+
-+static struct RAMBlockNotifier nvmm_ram_notifier = {
-+    .ram_block_added = nvmm_ram_block_added
-+};
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static void
-+nvmm_handle_interrupt(CPUState *cpu, int mask)
-+{
-+    cpu->interrupt_request |= mask;
-+
-+    if (!qemu_cpu_is_self(cpu)) {
-+        qemu_cpu_kick(cpu);
-+    }
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+
-+static int
-+nvmm_accel_configure(struct nvmm_machine *mach)
-+{
-+    struct nvmm_mach_conf_x86_cpuid cpuid;
-+    int ret;
-+
-+    memset(&cpuid, 0, sizeof(cpuid));
-+    cpuid.leaf = 0x00000001;
-+    cpuid.del.edx = CPUID_MCE | CPUID_MCA | CPUID_MTRR;
-+
-+    ret = nvmm_machine_configure(mach, NVMM_MACH_CONF_X86_CPUID, &cpuid);
-+    if (ret == -1)
-+        return -1;
-+
-+    ret = nvmm_machine_configure(mach, NVMM_MACH_CONF_CALLBACKS,
-+        &nvmm_callbacks);
-+    if (ret == -1)
-+        return -1;
-+
-+    return 0;
-+}
-+
-+static int
-+nvmm_accel_init(MachineState *ms)
-+{
-+    struct nvmm_capability cap;
-+    int ret;
-+
-+    ret = nvmm_capability(&cap);
-+    if (ret == -1) {
-+        error_report("NVMM: No accelerator found, error=%d", errno);
-+        return -ENOSPC;
-+    }
-+    if (cap.version != 1) {
-+        error_report("NVMM: Unsupported version %lu", cap.version);
-+        return -ENOSPC;
-+    }
-+    if (cap.state_size != sizeof(struct nvmm_x64_state)) {
-+        error_report("NVMM: Wrong state size %zu", cap.state_size);
-+        return -ENOSPC;
-+    }
-+
-+    ret = nvmm_machine_create(&qemu_mach.mach);
-+    if (ret == -1) {
-+        error_report("NVMM: Machine creation failed, error=%d", errno);
-+        return -ENOSPC;
-+    }
-+
-+    ret = nvmm_accel_configure(&qemu_mach.mach);
-+    if (ret == -1) {
-+        error_report("NVMM: Machine configuration failed, error=%d",
-+            errno);
-+        return -ENOSPC;
-+    }
-+
-+    memory_listener_register(&nvmm_memory_listener, &address_space_memory);
-+    ram_block_notifier_add(&nvmm_ram_notifier);
-+
-+    cpu_interrupt_handler = nvmm_handle_interrupt;
-+
-+    printf("NetBSD Virtual Machine Monitor accelerator is operational\n");
-+    return 0;
-+}
-+
-+int
-+nvmm_enabled(void)
-+{
-+    return nvmm_allowed;
-+}
-+
-+static void
-+nvmm_accel_class_init(ObjectClass *oc, void *data)
-+{
-+    AccelClass *ac = ACCEL_CLASS(oc);
-+    ac->name = "NVMM";
-+    ac->init_machine = nvmm_accel_init;
-+    ac->allowed = &nvmm_allowed;
-+}
-+
-+static const TypeInfo nvmm_accel_type = {
-+    .name = ACCEL_CLASS_NAME("nvmm"),
-+    .parent = TYPE_ACCEL,
-+    .class_init = nvmm_accel_class_init,
-+};
-+
-+static void
-+nvmm_type_init(void)
-+{
-+    type_register_static(&nvmm_accel_type);
-+}
-+
-+type_init(nvmm_type_init);
---- vl.c	2018-12-11 18:44:35.000000000 +0100
-+++ vl.c	2019-05-01 11:53:33.095579431 +0200
-@@ -3653,7 +3653,8 @@
-                                                      optarg, true);
-                 optarg = qemu_opt_get(accel_opts, "accel");
-                 if (!optarg || is_help_option(optarg)) {
--                    error_printf("Possible accelerators: kvm, xen, hax, tcg\n");
-+                    error_printf("Possible accelerators: "
-+                        "kvm, xen, hax, nvmm, tcg\n");
-                     exit(0);
-                 }
-                 opts = qemu_opts_create(qemu_find_opts("machine"), NULL,
diff --git a/qemu-nvmm/patches/patch-target_arm_cpu.h b/qemu-nvmm/patches/patch-target_arm_cpu.h
deleted file mode 100644
index 92f754bcfa..0000000000
--- a/qemu-nvmm/patches/patch-target_arm_cpu.h
+++ /dev/null
@@ -1,20 +0,0 @@
-$NetBSD: patch-target_arm_cpu.h,v 1.2 2019/01/31 13:39:10 martin Exp $
-
-Upstream fix for arm/aarch64 FPU exception handling
-test failures in the NetBSD ATF test suit.
-
---- target/arm/cpu.h.orig	2018-12-11 18:44:34.000000000 +0100
-+++ target/arm/cpu.h	2019-01-31 14:29:37.037792781 +0100
-@@ -1299,6 +1299,12 @@ void vfp_set_fpscr(CPUARMState *env, uin
- #define FPSR_MASK 0xf800009f
- #define FPCR_MASK 0x07ff9f00
- 
-+#define FPCR_IOE    (1 << 8)    /* Invalid Operation exception trap enable */
-+#define FPCR_DZE    (1 << 9)    /* Divide by Zero exception trap enable */
-+#define FPCR_OFE    (1 << 10)   /* Overflow exception trap enable */
-+#define FPCR_UFE    (1 << 11)   /* Underflow exception trap enable */
-+#define FPCR_IXE    (1 << 12)   /* Inexact exception trap enable */
-+#define FPCR_IDE    (1 << 15)   /* Input Denormal exception trap enable */
- #define FPCR_FZ16   (1 << 19)   /* ARMv8.2+, FP16 flush-to-zero */
- #define FPCR_FZ     (1 << 24)   /* Flush-to-zero enable bit */
- #define FPCR_DN     (1 << 25)   /* Default NaN enable bit */
diff --git a/qemu-nvmm/patches/patch-target_arm_helper.c b/qemu-nvmm/patches/patch-target_arm_helper.c
deleted file mode 100644
index f167878d65..0000000000
--- a/qemu-nvmm/patches/patch-target_arm_helper.c
+++ /dev/null
@@ -1,20 +0,0 @@
-$NetBSD: patch-target_arm_helper.c,v 1.1 2019/01/31 13:39:10 martin Exp $
-
-Upstream fix for arm/aarch64 FPU exception handling
-test failures in the NetBSD ATF test suit.
-
---- target/arm/helper.c.orig	2018-12-11 18:44:34.000000000 +0100
-+++ target/arm/helper.c	2019-01-31 14:29:37.050410598 +0100
-@@ -11747,6 +11747,12 @@
-         val &= ~FPCR_FZ16;
-     }
- 
-+    /*
-+     * We don't implement trapped exception handling, so the
-+     * trap enable bits are all RAZ/WI (not RES0!)
-+     */
-+    val &= ~(FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE);
-+
-     changed = env->vfp.xregs[ARM_VFP_FPSCR];
-     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
-     env->vfp.vec_len = (val >> 16) & 7;
diff --git a/qemu-nvmm/patches/patch-tests_Makefile.include b/qemu-nvmm/patches/patch-tests_Makefile.include
deleted file mode 100644
index 9921005155..0000000000
--- a/qemu-nvmm/patches/patch-tests_Makefile.include
+++ /dev/null
@@ -1,17 +0,0 @@
-$NetBSD: patch-tests_Makefile.include,v 1.1 2016/09/04 09:21:04 ryoon Exp $
-
-* Don't link -lutil on SunOS
-
---- tests/Makefile.include.orig	2016-09-02 15:34:24.000000000 +0000
-+++ tests/Makefile.include
-@@ -645,8 +645,10 @@ tests/migration/initrd-stress.img: tests
- 	rmdir $(INITRD_WORK_DIR)
- 
- ifeq ($(CONFIG_POSIX),y)
-+ifneq ($(CONFIG_SOLARIS),y)
- LIBS += -lutil
- endif
-+endif
- 
- # QTest rules
- 


Home | Main Index | Thread Index | Old Index