pkgsrc-WIP-changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

qemu-nvmm: improvements



Module Name:	pkgsrc-wip
Committed By:	Maxime Villard <max%m00nbsd.net@localhost>
Pushed By:	maxv
Date:		Mon Feb 4 13:01:44 2019 +0100
Changeset:	085bc7a84e9c1fb57118136db97d08ee90132ecb

Modified Files:
	qemu-nvmm/Makefile
	qemu-nvmm/PLIST
	qemu-nvmm/distinfo
	qemu-nvmm/patches/patch-nvmm-support
Added Files:
	qemu-nvmm/patches/patch-hw_arm_boot.c
	qemu-nvmm/patches/patch-hw_core_loader.c
	qemu-nvmm/patches/patch-hw_core_uboot__image.h
	qemu-nvmm/patches/patch-target_arm_cpu.h
	qemu-nvmm/patches/patch-target_arm_helper.c

Log Message:
qemu-nvmm: improvements

 - Rebase to qemu-3.1.0.
 - Switch to qemu indentation and copyright.
 - Fix inverted SYSENTER MSRs.

To see a diff of this commit:
https://wip.pkgsrc.org/cgi-bin/gitweb.cgi?p=pkgsrc-wip.git;a=commitdiff;h=085bc7a84e9c1fb57118136db97d08ee90132ecb

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

diffstat:
 qemu-nvmm/Makefile                             |   20 +-
 qemu-nvmm/PLIST                                |    4 +
 qemu-nvmm/distinfo                             |   17 +-
 qemu-nvmm/patches/patch-hw_arm_boot.c          |   26 +
 qemu-nvmm/patches/patch-hw_core_loader.c       |   27 +
 qemu-nvmm/patches/patch-hw_core_uboot__image.h |   20 +
 qemu-nvmm/patches/patch-nvmm-support           | 1861 ++++++++++++------------
 qemu-nvmm/patches/patch-target_arm_cpu.h       |   20 +
 qemu-nvmm/patches/patch-target_arm_helper.c    |   20 +
 9 files changed, 1041 insertions(+), 974 deletions(-)

diffs:
diff --git a/qemu-nvmm/Makefile b/qemu-nvmm/Makefile
index 1b84067544..ce38febd87 100644
--- a/qemu-nvmm/Makefile
+++ b/qemu-nvmm/Makefile
@@ -1,7 +1,7 @@
-# $NetBSD: Makefile,v 1.191 2018/08/22 09:45:05 wiz Exp $
+# $NetBSD: Makefile,v 1.200 2019/01/31 13:39:10 martin Exp $
 
-DISTNAME=	qemu-3.0.0
-PKGREVISION=	2
+DISTNAME=	qemu-3.1.0
+PKGREVISION=	4
 CATEGORIES=	emulators
 MASTER_SITES=	https://download.qemu.org/
 EXTRACT_SUFX=	.tar.xz
@@ -64,6 +64,20 @@ UE_ARCHS+=		x86_64
 .if ${OPSYS} == "NetBSD"
 USER_EMUL=		i386 x86_64 sparc sparc64
 PLIST.nbd=		YES
+.elif !empty(OPSYS:M*BSD) || ${OPSYS} == "DragonFly"
+USER_EMUL=		i386 x86_64 sparc sparc64
+PLIST.nbd=		YES
+.elif ${OPSYS} == "Darwin"
+USER_EMUL=
+CONFIGURE_ARGS+=	--disable-bsd-user
+PLIST.nbd=		YES
+.elif ${OPSYS} == "Linux"
+USER_EMUL=		${UE_ARCHS}
+PLIST.nbd=		YES
+PLIST.ivshmem=		YES
+.elif !empty(MACHINE_PLATFORM:MSunOS-5.11-*)
+PLIST.nbd=		YES
+CONFIGURE_ARGS+=	--disable-coroutine-pool
 .endif
 
 PLIST_VARS+=		${UE_ARCHS} nbd ivshmem
diff --git a/qemu-nvmm/PLIST b/qemu-nvmm/PLIST
index d966b6499c..8b2c3b73ab 100644
--- a/qemu-nvmm/PLIST
+++ b/qemu-nvmm/PLIST
@@ -1,4 +1,5 @@
 @comment $NetBSD$
+bin/qemu-edid
 bin/qemu-ga
 bin/qemu-img
 bin/qemu-io
@@ -7,6 +8,7 @@ bin/qemu-system-x86_64
 man/man1/qemu-img.1
 man/man1/qemu.1
 man/man7/qemu-block-drivers.7
+man/man7/qemu-cpu-models.7
 man/man7/qemu-ga-ref.7
 man/man7/qemu-qmp-ref.7
 man/man8/qemu-ga.8
@@ -99,8 +101,10 @@ share/qemu/spapr-rtas.bin
 share/qemu/trace-events-all
 share/qemu/u-boot-sam460-20100605.bin
 share/qemu/u-boot.e500
+share/qemu/vgabios-bochs-display.bin
 share/qemu/vgabios-cirrus.bin
 share/qemu/vgabios-qxl.bin
+share/qemu/vgabios-ramfb.bin
 share/qemu/vgabios-stdvga.bin
 share/qemu/vgabios-virtio.bin
 share/qemu/vgabios-vmware.bin
diff --git a/qemu-nvmm/distinfo b/qemu-nvmm/distinfo
index 2a6c71d78b..459e65b6e8 100644
--- a/qemu-nvmm/distinfo
+++ b/qemu-nvmm/distinfo
@@ -1,17 +1,22 @@
-$NetBSD: distinfo,v 1.137 2018/08/16 10:15:09 adam Exp $
+$NetBSD: distinfo,v 1.141 2019/01/31 13:39:10 martin Exp $
 
-SHA1 (qemu-3.0.0.tar.xz) = fffb4aa0139c7290295a129e040cec0df4468ea6
-RMD160 (qemu-3.0.0.tar.xz) = ea61cb8b6b144c7017c3a53161f883c3aeb15611
-SHA512 (qemu-3.0.0.tar.xz) = a764302f50b9aca4134bbbc1f361b98e71240cdc7b25600dfe733bf4cf17bd86000bd28357697b08f3b656899dceb9e459350b8d55557817444ed5d7fa380a5a
-Size (qemu-3.0.0.tar.xz) = 35624516 bytes
+SHA1 (qemu-3.1.0.tar.xz) = 3ed63c0c05abc8c8ec075dac2688c229f139a5da
+RMD160 (qemu-3.1.0.tar.xz) = 7650d76b8578ee2c31cef048c7929b30c607b83d
+SHA512 (qemu-3.1.0.tar.xz) = 7e8dae823937cfac2f0c60406bd3bdcb89df40313dab2a4bed327d5198f7fcc68ac8b31e44692caa09299cc71256ee0b8c17e4f49f78ada8043d424f5daf82fe
+Size (qemu-3.1.0.tar.xz) = 36070104 bytes
 SHA1 (patch-Makefile) = b3899fb8d0dd2f29bf3edd843836612e6e6c019c
 SHA1 (patch-audio_audio.c) = 98a1de2fd48638886b5d16f6a61dc72910e98b41
 SHA1 (patch-block.c) = 5eb15a87d6646719bf1e9277fbe73a99e4905481
 SHA1 (patch-contrib_ivshmem-client_ivshmem-client.c) = 40c8751607cbf66a37e4c4e08f2664b864e2e984
 SHA1 (patch-contrib_ivshmem-server_ivshmem-server.c) = d8f53432b5752f4263dc4ef96108a976a05147a3
+SHA1 (patch-hw_arm_boot.c) = bd28e4b8e8732a2b01ba1d0e8a727e8e7bc5227a
+SHA1 (patch-hw_core_loader.c) = 06ff8bfa5be720e428668987598d55b6799202e7
+SHA1 (patch-hw_core_uboot__image.h) = 26a656310d991747b7080b9f28042afd536e4c28
 SHA1 (patch-hw_display_omap__dss.c) = 6b13242f28e32346bc70548c216c578d98fd3420
 SHA1 (patch-hw_net_etraxfs__eth.c) = e5dd1661d60dbcd27b332403e0843500ba9544bc
 SHA1 (patch-hw_net_xilinx__axienet.c) = ebcd2676d64ce6f31e4a8c976d4fdf530ad5e8b7
 SHA1 (patch-hw_usb_dev-mtp.c) = 66543b5559d92f8e2fa9a6eb85e5dfe7c1ad3339
-SHA1 (patch-nvmm-support) = c8b47aa128df97ff9c16b6d334bd1ac4b0e7601c
+SHA1 (patch-nvmm-support) = 788b054cbdb232e1f6df087f222ea6b56a9d5e31
+SHA1 (patch-target_arm_cpu.h) = 0f70a35900c7cc3124dc11969643e0eef6ad6af5
+SHA1 (patch-target_arm_helper.c) = 08f9425422080442a2c90bb252423bab38651ae4
 SHA1 (patch-tests_Makefile.include) = 42345d697cb2e324dccf1d68bd8d61e8001c6162
diff --git a/qemu-nvmm/patches/patch-hw_arm_boot.c b/qemu-nvmm/patches/patch-hw_arm_boot.c
new file mode 100644
index 0000000000..60b69f66a8
--- /dev/null
+++ b/qemu-nvmm/patches/patch-hw_arm_boot.c
@@ -0,0 +1,26 @@
+$NetBSD: patch-hw_arm_boot.c,v 1.1 2018/11/05 07:27:59 skrll Exp $
+
+--- hw/arm/boot.c.orig	2018-11-04 17:27:47.000000000 +0000
++++ hw/arm/boot.c
+@@ -29,8 +29,9 @@
+  * Documentation/arm/Booting and Documentation/arm64/booting.txt
+  * They have different preferred image load offsets from system RAM base.
+  */
+-#define KERNEL_ARGS_ADDR 0x100
+-#define KERNEL_LOAD_ADDR 0x00010000
++#define KERNEL_ARGS_ADDR   0x100
++#define KERNEL_NOLOAD_ADDR 0x00000000
++#define KERNEL_LOAD_ADDR   0x00010000
+ #define KERNEL64_LOAD_ADDR 0x00080000
+ 
+ #define ARM64_TEXT_OFFSET_OFFSET    8
+@@ -1049,7 +1050,8 @@ void arm_load_kernel(ARMCPU *cpu, struct
+     }
+     entry = elf_entry;
+     if (kernel_size < 0) {
+-        kernel_size = load_uimage_as(info->kernel_filename, &entry, NULL,
++        uint64_t loadaddr = info->loader_start + KERNEL_NOLOAD_ADDR;
++        kernel_size = load_uimage_as(info->kernel_filename, &entry, &loadaddr,
+                                      &is_linux, NULL, NULL, as);
+     }
+     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && kernel_size < 0) {
diff --git a/qemu-nvmm/patches/patch-hw_core_loader.c b/qemu-nvmm/patches/patch-hw_core_loader.c
new file mode 100644
index 0000000000..700792064e
--- /dev/null
+++ b/qemu-nvmm/patches/patch-hw_core_loader.c
@@ -0,0 +1,27 @@
+$NetBSD: patch-hw_core_loader.c,v 1.1 2018/11/05 07:27:59 skrll Exp $
+
+--- hw/core/loader.c.orig	2018-08-14 19:10:34.000000000 +0000
++++ hw/core/loader.c
+@@ -637,13 +637,19 @@ static int load_uboot_image(const char *
+         goto out;
+ 
+     if (hdr->ih_type != image_type) {
+-        fprintf(stderr, "Wrong image type %d, expected %d\n", hdr->ih_type,
+-                image_type);
+-        goto out;
++        if (image_type != IH_TYPE_KERNEL && hdr->ih_type != IH_TYPE_KERNEL_NOLOAD) {
++            fprintf(stderr, "Wrong image type %d, expected %d\n", hdr->ih_type,
++                    image_type);
++            goto out;
++        }
+     }
+ 
+     /* TODO: Implement other image types.  */
+     switch (hdr->ih_type) {
++    case IH_TYPE_KERNEL_NOLOAD:
++        hdr->ih_load = *loadaddr + sizeof(*hdr);
++        hdr->ih_ep += hdr->ih_load;
++
+     case IH_TYPE_KERNEL:
+         address = hdr->ih_load;
+         if (translate_fn) {
diff --git a/qemu-nvmm/patches/patch-hw_core_uboot__image.h b/qemu-nvmm/patches/patch-hw_core_uboot__image.h
new file mode 100644
index 0000000000..a9a005762b
--- /dev/null
+++ b/qemu-nvmm/patches/patch-hw_core_uboot__image.h
@@ -0,0 +1,20 @@
+$NetBSD: patch-hw_core_uboot__image.h,v 1.1 2018/11/05 07:27:59 skrll Exp $
+
+--- hw/core/uboot_image.h.orig	2018-08-14 19:10:34.000000000 +0000
++++ hw/core/uboot_image.h
+@@ -75,6 +75,7 @@
+ #define IH_CPU_NIOS2		15	/* Nios-II	*/
+ #define IH_CPU_BLACKFIN		16	/* Blackfin	*/
+ #define IH_CPU_AVR32		17	/* AVR32	*/
++#define IH_CPU_ARM64		22	/* ARM64        */
+ 
+ /*
+  * Image Types
+@@ -124,6 +125,7 @@
+ #define IH_TYPE_SCRIPT		6	/* Script file			*/
+ #define IH_TYPE_FILESYSTEM	7	/* Filesystem Image (any type)	*/
+ #define IH_TYPE_FLATDT		8	/* Binary Flat Device Tree Blob	*/
++#define IH_TYPE_KERNEL_NOLOAD  14	/* OS Kernel Image (noload)	*/
+ 
+ /*
+  * Compression Types
diff --git a/qemu-nvmm/patches/patch-nvmm-support b/qemu-nvmm/patches/patch-nvmm-support
index 3710f08141..013f0d6e23 100644
--- a/qemu-nvmm/patches/patch-nvmm-support
+++ b/qemu-nvmm/patches/patch-nvmm-support
@@ -2,8 +2,8 @@ $NetBSD: patch-nvmm_support,v 1.1 2018/10/29 00:00:00 maxv Exp $
 
 Add NVMM support.
 
---- accel/stubs/Makefile.objs	2018-08-14 21:10:34.000000000 +0200
-+++ accel/stubs/Makefile.objs	2018-10-30 10:53:45.520361253 +0100
+--- accel/stubs/Makefile.objs	2018-12-11 18:44:34.000000000 +0100
++++ accel/stubs/Makefile.objs	2019-02-04 09:58:31.612072806 +0100
 @@ -1,5 +1,6 @@
  obj-$(call lnot,$(CONFIG_HAX))  += hax-stub.o
  obj-$(call lnot,$(CONFIG_HVF))  += hvf-stub.o
@@ -12,39 +12,15 @@ Add NVMM support.
  obj-$(call lnot,$(CONFIG_KVM))  += kvm-stub.o
  obj-$(call lnot,$(CONFIG_TCG))  += tcg-stub.o
 --- accel/stubs/nvmm-stub.c	1970-01-01 01:00:00.000000000 +0100
-+++ accel/stubs/nvmm-stub.c	2019-01-02 19:21:03.914538149 +0100
-@@ -0,0 +1,67 @@
++++ accel/stubs/nvmm-stub.c	2019-02-04 12:03:40.012081666 +0100
+@@ -0,0 +1,43 @@
 +/*
-+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
-+ * All rights reserved.
++ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
 + *
-+ * This code is derived from software contributed to The NetBSD Foundation
-+ * by Maxime Villard.
++ * NetBSD Virtual Machine Monitor (NVMM) accelerator stub.
 + *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
-+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
-+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ * QEMU NetBSD Virtual Machine Monitor accelerator (NVMM) stub
++ * This work is licensed under the terms of the GNU GPL, version 2 or later.
++ * See the COPYING file in the top-level directory.
 + */
 +
 +#include "qemu/osdep.h"
@@ -54,12 +30,12 @@ Add NVMM support.
 +
 +int nvmm_init_vcpu(CPUState *cpu)
 +{
-+	return -1;
++    return -1;
 +}
 +
 +int nvmm_vcpu_exec(CPUState *cpu)
 +{
-+	return -1;
++    return -1;
 +}
 +
 +void nvmm_destroy_vcpu(CPUState *cpu)
@@ -81,9 +57,9 @@ Add NVMM support.
 +void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
 +{
 +}
---- configure	2018-08-14 21:10:34.000000000 +0200
-+++ configure	2018-11-25 07:55:31.665393888 +0100
-@@ -238,6 +238,17 @@
+--- configure	2018-12-11 18:44:34.000000000 +0100
++++ configure	2019-02-04 11:34:28.769555764 +0100
+@@ -237,6 +237,17 @@
      return 1
  }
  
@@ -101,7 +77,7 @@ Add NVMM support.
  supported_target() {
      case "$1" in
          *-softmmu)
-@@ -265,6 +276,7 @@
+@@ -264,6 +275,7 @@
      supported_hax_target "$1" && return 0
      supported_hvf_target "$1" && return 0
      supported_whpx_target "$1" && return 0
@@ -109,15 +85,15 @@ Add NVMM support.
      print_error "TCG disabled, but hardware accelerator not available for '$target'"
      return 1
  }
-@@ -374,6 +386,7 @@
+@@ -375,6 +387,7 @@
  hax="no"
  hvf="no"
  whpx="no"
 +nvmm="no"
  rdma=""
+ pvrdma=""
  gprof="no"
- debug_tcg="no"
-@@ -1133,6 +1146,10 @@
+@@ -1143,6 +1156,10 @@
    ;;
    --enable-whpx) whpx="yes"
    ;;
@@ -128,19 +104,19 @@ Add NVMM support.
    --disable-tcg-interpreter) tcg_interpreter="no"
    ;;
    --enable-tcg-interpreter) tcg_interpreter="yes"
-@@ -1669,6 +1686,7 @@
+@@ -1724,6 +1741,7 @@
    hax             HAX acceleration support
    hvf             Hypervisor.framework acceleration support
    whpx            Windows Hypervisor Platform acceleration support
 +  nvmm            NetBSD Virtual Machine Monitor acceleration support
-   rdma            Enable RDMA-based migration and PVRDMA support
+   rdma            Enable RDMA-based migration
+   pvrdma          Enable PVRDMA support
    vde             support for vde network
-   netmap          support for netmap network
-@@ -2612,6 +2630,20 @@
+@@ -2659,6 +2677,20 @@
  fi
  
  ##########################################
-+# NetBSD Virtual Machine Monitor accelerator (NVMM) check
++# NetBSD Virtual Machine Monitor (NVMM) accelerator check
 +if test "$nvmm" != "no" ; then
 +    if check_include "nvmm.h"; then
 +        nvmm="yes"
@@ -157,7 +133,7 @@ Add NVMM support.
  # Sparse probe
  if test "$sparse" != "no" ; then
    if has cgcc; then
-@@ -5945,6 +5977,7 @@
+@@ -6033,6 +6065,7 @@
  echo "HAX support       $hax"
  echo "HVF support       $hvf"
  echo "WHPX support      $whpx"
@@ -165,7 +141,7 @@ Add NVMM support.
  echo "TCG support       $tcg"
  if test "$tcg" = "yes" ; then
      echo "TCG debug enabled $debug_tcg"
-@@ -7162,6 +7195,9 @@
+@@ -7291,6 +7324,9 @@
  if supported_whpx_target $target; then
      echo "CONFIG_WHPX=y" >> $config_target_mak
  fi
@@ -175,8 +151,8 @@ Add NVMM support.
  if test "$target_bigendian" = "yes" ; then
    echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
  fi
---- cpus.c	2018-08-14 21:10:34.000000000 +0200
-+++ cpus.c	2019-01-02 19:01:47.486471540 +0100
+--- cpus.c	2018-12-11 18:44:34.000000000 +0100
++++ cpus.c	2019-02-04 10:03:02.152520882 +0100
 @@ -40,6 +40,7 @@
  #include "sysemu/hax.h"
  #include "sysemu/hvf.h"
@@ -185,7 +161,7 @@ Add NVMM support.
  #include "exec/exec-all.h"
  
  #include "qemu/thread.h"
-@@ -1616,6 +1617,48 @@
+@@ -1691,6 +1692,48 @@
      return NULL;
  }
  
@@ -234,7 +210,7 @@ Add NVMM support.
  #ifdef _WIN32
  static void CALLBACK dummy_apc_func(ULONG_PTR unused)
  {
-@@ -1970,6 +2013,19 @@
+@@ -2051,6 +2094,19 @@
  #endif
  }
  
@@ -254,7 +230,7 @@ Add NVMM support.
  static void qemu_dummy_start_vcpu(CPUState *cpu)
  {
      char thread_name[VCPU_THREAD_NAME_SIZE];
-@@ -2007,6 +2063,8 @@
+@@ -2088,6 +2144,8 @@
          qemu_tcg_init_vcpu(cpu);
      } else if (whpx_enabled()) {
          qemu_whpx_start_vcpu(cpu);
@@ -263,8 +239,8 @@ Add NVMM support.
      } else {
          qemu_dummy_start_vcpu(cpu);
      }
---- include/sysemu/hw_accel.h	2018-08-14 21:10:34.000000000 +0200
-+++ include/sysemu/hw_accel.h	2018-10-30 11:01:56.162918154 +0100
+--- include/sysemu/hw_accel.h	2018-12-11 18:44:34.000000000 +0100
++++ include/sysemu/hw_accel.h	2019-02-04 10:04:09.887130860 +0100
 @@ -15,6 +15,7 @@
  #include "sysemu/hax.h"
  #include "sysemu/kvm.h"
@@ -314,39 +290,15 @@ Add NVMM support.
  
  #endif /* QEMU_HW_ACCEL_H */
 --- include/sysemu/nvmm.h	1970-01-01 01:00:00.000000000 +0100
-+++ include/sysemu/nvmm.h	2019-01-02 19:21:06.862479422 +0100
-@@ -0,0 +1,59 @@
++++ include/sysemu/nvmm.h	2019-02-04 12:06:14.967932051 +0100
+@@ -0,0 +1,35 @@
 +/*
-+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
-+ * All rights reserved.
-+ *
-+ * This code is derived from software contributed to The NetBSD Foundation
-+ * by Maxime Villard.
++ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
 + *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
++ * NetBSD Virtual Machine Monitor (NVMM) accelerator support.
 + *
-+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
-+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
-+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ * QEMU NetBSD Virtual Machine Monitor accelerator (NVMM) support
++ * This work is licensed under the terms of the GNU GPL, version 2 or later.
++ * See the COPYING file in the top-level directory.
 + */
 +
 +#ifndef QEMU_NVMM_H
@@ -375,9 +327,9 @@ Add NVMM support.
 +#endif /* CONFIG_NVMM */
 +
 +#endif /* CONFIG_NVMM */
---- qemu-options.hx	2018-08-14 21:10:34.000000000 +0200
-+++ qemu-options.hx	2018-10-30 11:03:36.343787082 +0100
-@@ -67,7 +67,7 @@
+--- qemu-options.hx	2018-12-11 18:44:34.000000000 +0100
++++ qemu-options.hx	2019-02-04 10:05:16.697759799 +0100
+@@ -66,7 +66,7 @@
  @table @option
  @item accel=@var{accels1}[:@var{accels2}[:...]]
  This is used to enable an accelerator. Depending on the target architecture,
@@ -386,7 +338,7 @@ Add NVMM support.
  more than one accelerator specified, the next one is used if the previous one
  fails to initialize.
  @item kernel_irqchip=on|off
-@@ -129,13 +129,13 @@
+@@ -119,13 +119,13 @@
  
  DEF("accel", HAS_ARG, QEMU_OPTION_accel,
      "-accel [accel=]accelerator[,thread=single|multi]\n"
@@ -402,8 +354,8 @@ Add NVMM support.
  more than one accelerator specified, the next one is used if the previous one
  fails to initialize.
  @table @option
---- target/i386/helper.c	2018-08-14 21:10:35.000000000 +0200
-+++ target/i386/helper.c	2018-10-30 11:04:55.680328352 +0100
+--- target/i386/helper.c	2018-12-11 18:44:34.000000000 +0100
++++ target/i386/helper.c	2019-02-04 10:05:47.993117568 +0100
 @@ -986,7 +986,7 @@
      X86CPU *cpu = x86_env_get_cpu(env);
      CPUState *cs = CPU(cpu);
@@ -413,47 +365,26 @@ Add NVMM support.
          env->tpr_access_type = access;
  
          cpu_interrupt(cs, CPU_INTERRUPT_TPR);
---- target/i386/Makefile.objs	2018-08-14 21:10:35.000000000 +0200
-+++ target/i386/Makefile.objs	2018-10-30 11:04:35.159682003 +0100
-@@ -17,3 +17,4 @@
+--- target/i386/Makefile.objs	2018-12-11 18:44:34.000000000 +0100
++++ target/i386/Makefile.objs	2019-02-04 10:06:13.786588242 +0100
+@@ -17,6 +17,7 @@
  obj-$(CONFIG_HVF) += hvf/
  endif
  obj-$(CONFIG_WHPX) += whpx-all.o
 +obj-$(CONFIG_NVMM) += nvmm-all.o
+ endif
+ obj-$(CONFIG_SEV) += sev.o
+ obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o
 --- target/i386/nvmm-all.c	1970-01-01 01:00:00.000000000 +0100
-+++ target/i386/nvmm-all.c	2019-01-24 13:08:23.465295728 +0100
-@@ -0,0 +1,1197 @@
++++ target/i386/nvmm-all.c	2019-02-04 12:02:48.551127655 +0100
+@@ -0,0 +1,1173 @@
 +/*
-+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
-+ * All rights reserved.
++ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
 + *
-+ * This code is derived from software contributed to The NetBSD Foundation
-+ * by Maxime Villard.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ *    notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ *    notice, this list of conditions and the following disclaimer in the
-+ *    documentation and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
-+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
-+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
 + * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2 or later.
++ * See the COPYING file in the top-level directory.
 + */
 +
 +#include "qemu/osdep.h"
@@ -478,32 +409,32 @@ Add NVMM support.
 +static bool nvmm_allowed = false;
 +
 +struct nvmm_vcpu {
-+	nvmm_cpuid_t cpuid;
-+	uint8_t tpr;
-+	bool stop;
++    nvmm_cpuid_t cpuid;
++    uint8_t tpr;
++    bool stop;
 +
-+	/* Window-exiting for INTs/NMIs. */
-+	bool int_window_exit;
-+	bool nmi_window_exit;
++    /* Window-exiting for INTs/NMIs. */
++    bool int_window_exit;
++    bool nmi_window_exit;
 +
-+	/* The guest is an interrupt shadow (POP SS, etc). */
-+	bool int_shadow;
++    /* The guest is an interrupt shadow (POP SS, etc). */
++    bool int_shadow;
 +};
 +
 +static struct {
-+	struct nvmm_machine mach;
++    struct nvmm_machine mach;
 +} nvmm_global;
 +
 +static struct nvmm_vcpu *
 +get_nvmm_vcpu(CPUState *cpu)
 +{
-+	return (struct nvmm_vcpu *)cpu->hax_vcpu;
++    return (struct nvmm_vcpu *)cpu->hax_vcpu;
 +}
 +
 +static struct nvmm_machine *
 +get_nvmm_mach(void)
 +{
-+	return &nvmm_global.mach;
++    return &nvmm_global.mach;
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -511,314 +442,314 @@ Add NVMM support.
 +static void
 +nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg)
 +{
-+	uint32_t attrib = qseg->flags;
-+
-+	nseg->selector = qseg->selector;
-+	nseg->limit = qseg->limit;
-+	nseg->base = qseg->base;
-+	nseg->attrib.type =
-+	    (__SHIFTOUT(attrib, DESC_S_MASK) << 4) |
-+	    (__SHIFTOUT(attrib, DESC_TYPE_MASK) << 0);
-+	nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK);
-+	nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK);
-+	nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK);
-+	nseg->attrib.lng = __SHIFTOUT(attrib, DESC_L_MASK);
-+	nseg->attrib.def32 = __SHIFTOUT(attrib, DESC_B_MASK);
-+	nseg->attrib.gran = __SHIFTOUT(attrib, DESC_G_MASK);
++    uint32_t attrib = qseg->flags;
++
++    nseg->selector = qseg->selector;
++    nseg->limit = qseg->limit;
++    nseg->base = qseg->base;
++    nseg->attrib.type =
++        (__SHIFTOUT(attrib, DESC_S_MASK) << 4) |
++        (__SHIFTOUT(attrib, DESC_TYPE_MASK) << 0);
++    nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK);
++    nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK);
++    nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK);
++    nseg->attrib.lng = __SHIFTOUT(attrib, DESC_L_MASK);
++    nseg->attrib.def32 = __SHIFTOUT(attrib, DESC_B_MASK);
++    nseg->attrib.gran = __SHIFTOUT(attrib, DESC_G_MASK);
 +}
 +
 +static void
 +nvmm_set_registers(CPUState *cpu)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	struct nvmm_x64_state state;
-+	uint64_t bitmap;
-+	size_t i;
-+	int ret;
-+
-+	assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
-+
-+	/* GPRs. */
-+	state.gprs[NVMM_X64_GPR_RAX] = (uint64_t)env->regs[R_EAX];
-+	state.gprs[NVMM_X64_GPR_RCX] = (uint64_t)env->regs[R_ECX];
-+	state.gprs[NVMM_X64_GPR_RDX] = (uint64_t)env->regs[R_EDX];
-+	state.gprs[NVMM_X64_GPR_RBX] = (uint64_t)env->regs[R_EBX];
-+	state.gprs[NVMM_X64_GPR_RSP] = (uint64_t)env->regs[R_ESP];
-+	state.gprs[NVMM_X64_GPR_RBP] = (uint64_t)env->regs[R_EBP];
-+	state.gprs[NVMM_X64_GPR_RSI] = (uint64_t)env->regs[R_ESI];
-+	state.gprs[NVMM_X64_GPR_RDI] = (uint64_t)env->regs[R_EDI];
-+	state.gprs[NVMM_X64_GPR_R8]  = (uint64_t)env->regs[R_R8];
-+	state.gprs[NVMM_X64_GPR_R9]  = (uint64_t)env->regs[R_R9];
-+	state.gprs[NVMM_X64_GPR_R10] = (uint64_t)env->regs[R_R10];
-+	state.gprs[NVMM_X64_GPR_R11] = (uint64_t)env->regs[R_R11];
-+	state.gprs[NVMM_X64_GPR_R12] = (uint64_t)env->regs[R_R12];
-+	state.gprs[NVMM_X64_GPR_R13] = (uint64_t)env->regs[R_R13];
-+	state.gprs[NVMM_X64_GPR_R14] = (uint64_t)env->regs[R_R14];
-+	state.gprs[NVMM_X64_GPR_R15] = (uint64_t)env->regs[R_R15];
-+
-+	/* RIP and RFLAGS. */
-+	state.gprs[NVMM_X64_GPR_RIP] = (uint64_t)env->eip;
-+	state.gprs[NVMM_X64_GPR_RFLAGS] = (uint64_t)env->eflags;
-+
-+	/* Segments. */
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
-+
-+	/* Special segments. */
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_GDT], &env->gdt);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_LDT], &env->ldt);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_TR], &env->tr);
-+	nvmm_set_segment(&state.segs[NVMM_X64_SEG_IDT], &env->idt);
-+
-+	/* Control registers. */
-+	state.crs[NVMM_X64_CR_CR0] = (uint64_t)env->cr[0];
-+	state.crs[NVMM_X64_CR_CR2] = (uint64_t)env->cr[2];
-+	state.crs[NVMM_X64_CR_CR3] = (uint64_t)env->cr[3];
-+	state.crs[NVMM_X64_CR_CR4] = (uint64_t)env->cr[4];
-+	state.crs[NVMM_X64_CR_CR8] = (uint64_t)vcpu->tpr;
-+
-+	/* Debug registers. */
-+	state.drs[NVMM_X64_DR_DR1] = (uint64_t)env->dr[1];
-+	state.drs[NVMM_X64_DR_DR2] = (uint64_t)env->dr[2];
-+	state.drs[NVMM_X64_DR_DR3] = (uint64_t)env->dr[3];
-+	state.drs[NVMM_X64_DR_DR6] = (uint64_t)env->dr[6];
-+	state.drs[NVMM_X64_DR_DR7] = (uint64_t)env->dr[7];
-+
-+	/* FPU. */
-+	state.fpu.fx_cw = env->fpuc;
-+	state.fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
-+	state.fpu.fx_tw = 0;
-+	for (i = 0; i < 8; i++) {
-+		state.fpu.fx_tw |= (!env->fptags[i]) << i;
-+	}
-+	state.fpu.fx_opcode = env->fpop;
-+	state.fpu.fx_ip.fa_64 = env->fpip;
-+	state.fpu.fx_dp.fa_64 = env->fpdp;
-+	state.fpu.fx_mxcsr = env->mxcsr;
-+	state.fpu.fx_mxcsr_mask = 0x0000FFFF;
-+	assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
-+	memcpy(state.fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
-+	for (i = 0; i < 16; i++) {
-+		memcpy(&state.fpu.fx_xmm[i].xmm_bytes[0],
-+		    &env->xmm_regs[i].ZMM_Q(0), 8);
-+		memcpy(&state.fpu.fx_xmm[i].xmm_bytes[8],
-+		    &env->xmm_regs[i].ZMM_Q(1), 8);
-+	}
-+
-+	/* MSRs. */
-+	state.msrs[NVMM_X64_MSR_EFER] = env->efer;
-+	state.msrs[NVMM_X64_MSR_STAR] = env->star;
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    struct nvmm_machine *mach = get_nvmm_mach();
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    struct nvmm_x64_state state;
++    uint64_t bitmap;
++    size_t i;
++    int ret;
++
++    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
++
++    /* GPRs. */
++    state.gprs[NVMM_X64_GPR_RAX] = (uint64_t)env->regs[R_EAX];
++    state.gprs[NVMM_X64_GPR_RCX] = (uint64_t)env->regs[R_ECX];
++    state.gprs[NVMM_X64_GPR_RDX] = (uint64_t)env->regs[R_EDX];
++    state.gprs[NVMM_X64_GPR_RBX] = (uint64_t)env->regs[R_EBX];
++    state.gprs[NVMM_X64_GPR_RSP] = (uint64_t)env->regs[R_ESP];
++    state.gprs[NVMM_X64_GPR_RBP] = (uint64_t)env->regs[R_EBP];
++    state.gprs[NVMM_X64_GPR_RSI] = (uint64_t)env->regs[R_ESI];
++    state.gprs[NVMM_X64_GPR_RDI] = (uint64_t)env->regs[R_EDI];
++    state.gprs[NVMM_X64_GPR_R8]  = (uint64_t)env->regs[R_R8];
++    state.gprs[NVMM_X64_GPR_R9]  = (uint64_t)env->regs[R_R9];
++    state.gprs[NVMM_X64_GPR_R10] = (uint64_t)env->regs[R_R10];
++    state.gprs[NVMM_X64_GPR_R11] = (uint64_t)env->regs[R_R11];
++    state.gprs[NVMM_X64_GPR_R12] = (uint64_t)env->regs[R_R12];
++    state.gprs[NVMM_X64_GPR_R13] = (uint64_t)env->regs[R_R13];
++    state.gprs[NVMM_X64_GPR_R14] = (uint64_t)env->regs[R_R14];
++    state.gprs[NVMM_X64_GPR_R15] = (uint64_t)env->regs[R_R15];
++
++    /* RIP and RFLAGS. */
++    state.gprs[NVMM_X64_GPR_RIP] = (uint64_t)env->eip;
++    state.gprs[NVMM_X64_GPR_RFLAGS] = (uint64_t)env->eflags;
++
++    /* Segments. */
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
++
++    /* Special segments. */
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_GDT], &env->gdt);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_LDT], &env->ldt);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_TR], &env->tr);
++    nvmm_set_segment(&state.segs[NVMM_X64_SEG_IDT], &env->idt);
++
++    /* Control registers. */
++    state.crs[NVMM_X64_CR_CR0] = (uint64_t)env->cr[0];
++    state.crs[NVMM_X64_CR_CR2] = (uint64_t)env->cr[2];
++    state.crs[NVMM_X64_CR_CR3] = (uint64_t)env->cr[3];
++    state.crs[NVMM_X64_CR_CR4] = (uint64_t)env->cr[4];
++    state.crs[NVMM_X64_CR_CR8] = (uint64_t)vcpu->tpr;
++
++    /* Debug registers. */
++    state.drs[NVMM_X64_DR_DR1] = (uint64_t)env->dr[1];
++    state.drs[NVMM_X64_DR_DR2] = (uint64_t)env->dr[2];
++    state.drs[NVMM_X64_DR_DR3] = (uint64_t)env->dr[3];
++    state.drs[NVMM_X64_DR_DR6] = (uint64_t)env->dr[6];
++    state.drs[NVMM_X64_DR_DR7] = (uint64_t)env->dr[7];
++
++    /* FPU. */
++    state.fpu.fx_cw = env->fpuc;
++    state.fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
++    state.fpu.fx_tw = 0;
++    for (i = 0; i < 8; i++) {
++        state.fpu.fx_tw |= (!env->fptags[i]) << i;
++    }
++    state.fpu.fx_opcode = env->fpop;
++    state.fpu.fx_ip.fa_64 = env->fpip;
++    state.fpu.fx_dp.fa_64 = env->fpdp;
++    state.fpu.fx_mxcsr = env->mxcsr;
++    state.fpu.fx_mxcsr_mask = 0x0000FFFF;
++    assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
++    memcpy(state.fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
++    for (i = 0; i < 16; i++) {
++        memcpy(&state.fpu.fx_xmm[i].xmm_bytes[0],
++            &env->xmm_regs[i].ZMM_Q(0), 8);
++        memcpy(&state.fpu.fx_xmm[i].xmm_bytes[8],
++            &env->xmm_regs[i].ZMM_Q(1), 8);
++    }
++
++    /* MSRs. */
++    state.msrs[NVMM_X64_MSR_EFER] = env->efer;
++    state.msrs[NVMM_X64_MSR_STAR] = env->star;
 +#ifdef TARGET_X86_64
-+	state.msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
-+	state.msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
-+	state.msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
-+	state.msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
++    state.msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
++    state.msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
++    state.msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
++    state.msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
 +#endif
-+	state.msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
-+	state.msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_eip;
-+	state.msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_esp;
-+	state.msrs[NVMM_X64_MSR_PAT] = env->pat;
-+
-+	bitmap =
-+	    NVMM_X64_STATE_SEGS |
-+	    NVMM_X64_STATE_GPRS |
-+	    NVMM_X64_STATE_CRS  |
-+	    NVMM_X64_STATE_DRS  |
-+	    NVMM_X64_STATE_MSRS |
-+	    NVMM_X64_STATE_FPU;
-+
-+	ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state, bitmap);
-+	if (ret == -1) {
-+		error_report("NVMM: Failed to set virtual processor context,"
-+		    " error=%d", errno);
-+	}
++    state.msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
++    state.msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
++    state.msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
++    state.msrs[NVMM_X64_MSR_PAT] = env->pat;
++
++    bitmap =
++        NVMM_X64_STATE_SEGS |
++        NVMM_X64_STATE_GPRS |
++        NVMM_X64_STATE_CRS  |
++        NVMM_X64_STATE_DRS  |
++        NVMM_X64_STATE_MSRS |
++        NVMM_X64_STATE_FPU;
++
++    ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state, bitmap);
++    if (ret == -1) {
++        error_report("NVMM: Failed to set virtual processor context,"
++            " error=%d", errno);
++    }
 +}
 +
 +static void
 +nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg)
 +{
-+	qseg->selector = nseg->selector;
-+	qseg->limit = nseg->limit;
-+	qseg->base = nseg->base;
-+
-+	qseg->flags =
-+	    __SHIFTIN((nseg->attrib.type & 0b10000) >> 4, DESC_S_MASK) |
-+	    __SHIFTIN((nseg->attrib.type & 0b01111) >> 0, DESC_TYPE_MASK) |
-+	    __SHIFTIN(nseg->attrib.dpl, DESC_DPL_MASK) |
-+	    __SHIFTIN(nseg->attrib.p, DESC_P_MASK) |
-+	    __SHIFTIN(nseg->attrib.avl, DESC_AVL_MASK) |
-+	    __SHIFTIN(nseg->attrib.lng, DESC_L_MASK) |
-+	    __SHIFTIN(nseg->attrib.def32, DESC_B_MASK) |
-+	    __SHIFTIN(nseg->attrib.gran, DESC_G_MASK);
++    qseg->selector = nseg->selector;
++    qseg->limit = nseg->limit;
++    qseg->base = nseg->base;
++
++    qseg->flags =
++        __SHIFTIN((nseg->attrib.type & 0b10000) >> 4, DESC_S_MASK) |
++        __SHIFTIN((nseg->attrib.type & 0b01111) >> 0, DESC_TYPE_MASK) |
++        __SHIFTIN(nseg->attrib.dpl, DESC_DPL_MASK) |
++        __SHIFTIN(nseg->attrib.p, DESC_P_MASK) |
++        __SHIFTIN(nseg->attrib.avl, DESC_AVL_MASK) |
++        __SHIFTIN(nseg->attrib.lng, DESC_L_MASK) |
++        __SHIFTIN(nseg->attrib.def32, DESC_B_MASK) |
++        __SHIFTIN(nseg->attrib.gran, DESC_G_MASK);
 +}
 +
 +static void
 +nvmm_get_registers(CPUState *cpu)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	X86CPU *x86_cpu = X86_CPU(cpu);
-+	struct nvmm_x64_state state;
-+	uint64_t bitmap, tpr;
-+	size_t i;
-+	int ret;
-+
-+	assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
-+
-+	bitmap =
-+	    NVMM_X64_STATE_SEGS |
-+	    NVMM_X64_STATE_GPRS |
-+	    NVMM_X64_STATE_CRS  |
-+	    NVMM_X64_STATE_DRS  |
-+	    NVMM_X64_STATE_MSRS |
-+	    NVMM_X64_STATE_FPU;
-+
-+	ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state, bitmap);
-+	if (ret == -1) {
-+		error_report("NVMM: Failed to get virtual processor context,"
-+		    " error=%d", errno);
-+	}
-+
-+	/* GPRs. */
-+	env->regs[R_EAX] = state.gprs[NVMM_X64_GPR_RAX];
-+	env->regs[R_ECX] = state.gprs[NVMM_X64_GPR_RCX];
-+	env->regs[R_EDX] = state.gprs[NVMM_X64_GPR_RDX];
-+	env->regs[R_EBX] = state.gprs[NVMM_X64_GPR_RBX];
-+	env->regs[R_ESP] = state.gprs[NVMM_X64_GPR_RSP];
-+	env->regs[R_EBP] = state.gprs[NVMM_X64_GPR_RBP];
-+	env->regs[R_ESI] = state.gprs[NVMM_X64_GPR_RSI];
-+	env->regs[R_EDI] = state.gprs[NVMM_X64_GPR_RDI];
-+	env->regs[R_R8]  = state.gprs[NVMM_X64_GPR_R8];
-+	env->regs[R_R9]  = state.gprs[NVMM_X64_GPR_R9];
-+	env->regs[R_R10] = state.gprs[NVMM_X64_GPR_R10];
-+	env->regs[R_R11] = state.gprs[NVMM_X64_GPR_R11];
-+	env->regs[R_R12] = state.gprs[NVMM_X64_GPR_R12];
-+	env->regs[R_R13] = state.gprs[NVMM_X64_GPR_R13];
-+	env->regs[R_R14] = state.gprs[NVMM_X64_GPR_R14];
-+	env->regs[R_R15] = state.gprs[NVMM_X64_GPR_R15];
-+
-+	/* RIP and RFLAGS. */
-+	env->eip = state.gprs[NVMM_X64_GPR_RIP];
-+	env->eflags = state.gprs[NVMM_X64_GPR_RFLAGS];
-+
-+	/* Segments. */
-+	nvmm_get_segment(&env->segs[R_CS], &state.segs[NVMM_X64_SEG_CS]);
-+	nvmm_get_segment(&env->segs[R_DS], &state.segs[NVMM_X64_SEG_DS]);
-+	nvmm_get_segment(&env->segs[R_ES], &state.segs[NVMM_X64_SEG_ES]);
-+	nvmm_get_segment(&env->segs[R_FS], &state.segs[NVMM_X64_SEG_FS]);
-+	nvmm_get_segment(&env->segs[R_GS], &state.segs[NVMM_X64_SEG_GS]);
-+	nvmm_get_segment(&env->segs[R_SS], &state.segs[NVMM_X64_SEG_SS]);
-+
-+	/* Special segments. */
-+	nvmm_get_segment(&env->gdt, &state.segs[NVMM_X64_SEG_GDT]);
-+	nvmm_get_segment(&env->ldt, &state.segs[NVMM_X64_SEG_LDT]);
-+	nvmm_get_segment(&env->tr, &state.segs[NVMM_X64_SEG_TR]);
-+	nvmm_get_segment(&env->idt, &state.segs[NVMM_X64_SEG_IDT]);
-+
-+	/* Control registers. */
-+	env->cr[0] = state.crs[NVMM_X64_CR_CR0];
-+	env->cr[2] = state.crs[NVMM_X64_CR_CR2];
-+	env->cr[3] = state.crs[NVMM_X64_CR_CR3];
-+	env->cr[4] = state.crs[NVMM_X64_CR_CR4];
-+	tpr = state.crs[NVMM_X64_CR_CR8];
-+	if (tpr != vcpu->tpr) {
-+		vcpu->tpr = tpr;
-+		cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
-+	}
-+
-+	/* Debug registers. */
-+	env->dr[1] = state.drs[NVMM_X64_DR_DR1];
-+	env->dr[2] = state.drs[NVMM_X64_DR_DR2];
-+	env->dr[3] = state.drs[NVMM_X64_DR_DR3];
-+	env->dr[6] = state.drs[NVMM_X64_DR_DR6];
-+	env->dr[7] = state.drs[NVMM_X64_DR_DR7];
-+
-+	/* FPU. */
-+	env->fpuc = state.fpu.fx_cw;
-+	env->fpstt = (state.fpu.fx_sw >> 11) & 0x7;
-+	env->fpus = state.fpu.fx_sw & ~0x3800;
-+	for (i = 0; i < 8; i++) {
-+		env->fptags[i] = !((state.fpu.fx_tw >> i) & 1);
-+	}
-+	env->fpop = state.fpu.fx_opcode;
-+	env->fpip = state.fpu.fx_ip.fa_64;
-+	env->fpdp = state.fpu.fx_dp.fa_64;
-+	env->mxcsr = state.fpu.fx_mxcsr;
-+	assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
-+	memcpy(env->fpregs, state.fpu.fx_87_ac, sizeof(env->fpregs));
-+	for (i = 0; i < 16; i++) {
-+		memcpy(&env->xmm_regs[i].ZMM_Q(0),
-+		    &state.fpu.fx_xmm[i].xmm_bytes[0], 8);
-+		memcpy(&env->xmm_regs[i].ZMM_Q(1),
-+		    &state.fpu.fx_xmm[i].xmm_bytes[8], 8);
-+	}
-+
-+	/* MSRs. */
-+	env->efer = state.msrs[NVMM_X64_MSR_EFER];
-+	env->star = state.msrs[NVMM_X64_MSR_STAR];
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    struct nvmm_machine *mach = get_nvmm_mach();
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    X86CPU *x86_cpu = X86_CPU(cpu);
++    struct nvmm_x64_state state;
++    uint64_t bitmap, tpr;
++    size_t i;
++    int ret;
++
++    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
++
++    bitmap =
++        NVMM_X64_STATE_SEGS |
++        NVMM_X64_STATE_GPRS |
++        NVMM_X64_STATE_CRS  |
++        NVMM_X64_STATE_DRS  |
++        NVMM_X64_STATE_MSRS |
++        NVMM_X64_STATE_FPU;
++
++    ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state, bitmap);
++    if (ret == -1) {
++        error_report("NVMM: Failed to get virtual processor context,"
++            " error=%d", errno);
++    }
++
++    /* GPRs. */
++    env->regs[R_EAX] = state.gprs[NVMM_X64_GPR_RAX];
++    env->regs[R_ECX] = state.gprs[NVMM_X64_GPR_RCX];
++    env->regs[R_EDX] = state.gprs[NVMM_X64_GPR_RDX];
++    env->regs[R_EBX] = state.gprs[NVMM_X64_GPR_RBX];
++    env->regs[R_ESP] = state.gprs[NVMM_X64_GPR_RSP];
++    env->regs[R_EBP] = state.gprs[NVMM_X64_GPR_RBP];
++    env->regs[R_ESI] = state.gprs[NVMM_X64_GPR_RSI];
++    env->regs[R_EDI] = state.gprs[NVMM_X64_GPR_RDI];
++    env->regs[R_R8]  = state.gprs[NVMM_X64_GPR_R8];
++    env->regs[R_R9]  = state.gprs[NVMM_X64_GPR_R9];
++    env->regs[R_R10] = state.gprs[NVMM_X64_GPR_R10];
++    env->regs[R_R11] = state.gprs[NVMM_X64_GPR_R11];
++    env->regs[R_R12] = state.gprs[NVMM_X64_GPR_R12];
++    env->regs[R_R13] = state.gprs[NVMM_X64_GPR_R13];
++    env->regs[R_R14] = state.gprs[NVMM_X64_GPR_R14];
++    env->regs[R_R15] = state.gprs[NVMM_X64_GPR_R15];
++
++    /* RIP and RFLAGS. */
++    env->eip = state.gprs[NVMM_X64_GPR_RIP];
++    env->eflags = state.gprs[NVMM_X64_GPR_RFLAGS];
++
++    /* Segments. */
++    nvmm_get_segment(&env->segs[R_CS], &state.segs[NVMM_X64_SEG_CS]);
++    nvmm_get_segment(&env->segs[R_DS], &state.segs[NVMM_X64_SEG_DS]);
++    nvmm_get_segment(&env->segs[R_ES], &state.segs[NVMM_X64_SEG_ES]);
++    nvmm_get_segment(&env->segs[R_FS], &state.segs[NVMM_X64_SEG_FS]);
++    nvmm_get_segment(&env->segs[R_GS], &state.segs[NVMM_X64_SEG_GS]);
++    nvmm_get_segment(&env->segs[R_SS], &state.segs[NVMM_X64_SEG_SS]);
++
++    /* Special segments. */
++    nvmm_get_segment(&env->gdt, &state.segs[NVMM_X64_SEG_GDT]);
++    nvmm_get_segment(&env->ldt, &state.segs[NVMM_X64_SEG_LDT]);
++    nvmm_get_segment(&env->tr, &state.segs[NVMM_X64_SEG_TR]);
++    nvmm_get_segment(&env->idt, &state.segs[NVMM_X64_SEG_IDT]);
++
++    /* Control registers. */
++    env->cr[0] = state.crs[NVMM_X64_CR_CR0];
++    env->cr[2] = state.crs[NVMM_X64_CR_CR2];
++    env->cr[3] = state.crs[NVMM_X64_CR_CR3];
++    env->cr[4] = state.crs[NVMM_X64_CR_CR4];
++    tpr = state.crs[NVMM_X64_CR_CR8];
++    if (tpr != vcpu->tpr) {
++        vcpu->tpr = tpr;
++        cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
++    }
++
++    /* Debug registers. */
++    env->dr[1] = state.drs[NVMM_X64_DR_DR1];
++    env->dr[2] = state.drs[NVMM_X64_DR_DR2];
++    env->dr[3] = state.drs[NVMM_X64_DR_DR3];
++    env->dr[6] = state.drs[NVMM_X64_DR_DR6];
++    env->dr[7] = state.drs[NVMM_X64_DR_DR7];
++
++    /* FPU. */
++    env->fpuc = state.fpu.fx_cw;
++    env->fpstt = (state.fpu.fx_sw >> 11) & 0x7;
++    env->fpus = state.fpu.fx_sw & ~0x3800;
++    for (i = 0; i < 8; i++) {
++        env->fptags[i] = !((state.fpu.fx_tw >> i) & 1);
++    }
++    env->fpop = state.fpu.fx_opcode;
++    env->fpip = state.fpu.fx_ip.fa_64;
++    env->fpdp = state.fpu.fx_dp.fa_64;
++    env->mxcsr = state.fpu.fx_mxcsr;
++    assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
++    memcpy(env->fpregs, state.fpu.fx_87_ac, sizeof(env->fpregs));
++    for (i = 0; i < 16; i++) {
++        memcpy(&env->xmm_regs[i].ZMM_Q(0),
++            &state.fpu.fx_xmm[i].xmm_bytes[0], 8);
++        memcpy(&env->xmm_regs[i].ZMM_Q(1),
++            &state.fpu.fx_xmm[i].xmm_bytes[8], 8);
++    }
++
++    /* MSRs. */
++    env->efer = state.msrs[NVMM_X64_MSR_EFER];
++    env->star = state.msrs[NVMM_X64_MSR_STAR];
 +#ifdef TARGET_X86_64
-+	env->lstar = state.msrs[NVMM_X64_MSR_LSTAR];
-+	env->cstar = state.msrs[NVMM_X64_MSR_CSTAR];
-+	env->fmask = state.msrs[NVMM_X64_MSR_SFMASK];
-+	env->kernelgsbase = state.msrs[NVMM_X64_MSR_KERNELGSBASE];
++    env->lstar = state.msrs[NVMM_X64_MSR_LSTAR];
++    env->cstar = state.msrs[NVMM_X64_MSR_CSTAR];
++    env->fmask = state.msrs[NVMM_X64_MSR_SFMASK];
++    env->kernelgsbase = state.msrs[NVMM_X64_MSR_KERNELGSBASE];
 +#endif
-+	env->sysenter_cs  = state.msrs[NVMM_X64_MSR_SYSENTER_CS];
-+	env->sysenter_eip = state.msrs[NVMM_X64_MSR_SYSENTER_ESP];
-+	env->sysenter_esp = state.msrs[NVMM_X64_MSR_SYSENTER_EIP];
-+	env->pat = state.msrs[NVMM_X64_MSR_PAT];
++    env->sysenter_cs  = state.msrs[NVMM_X64_MSR_SYSENTER_CS];
++    env->sysenter_esp = state.msrs[NVMM_X64_MSR_SYSENTER_ESP];
++    env->sysenter_eip = state.msrs[NVMM_X64_MSR_SYSENTER_EIP];
++    env->pat = state.msrs[NVMM_X64_MSR_PAT];
 +
-+	x86_update_hflags(env);
++    x86_update_hflags(env);
 +}
 +
 +static bool
 +nvmm_can_take_int(CPUState *cpu)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	struct nvmm_machine *mach = get_nvmm_mach();
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    struct nvmm_machine *mach = get_nvmm_mach();
 +
-+	if (vcpu->int_window_exit) {
-+		return false;
-+	}
++    if (vcpu->int_window_exit) {
++        return false;
++    }
 +
-+	if (vcpu->int_shadow || (!(env->eflags & IF_MASK))) {
-+		struct nvmm_x64_state state;
++    if (vcpu->int_shadow || (!(env->eflags & IF_MASK))) {
++        struct nvmm_x64_state state;
 +
-+		/* Exit on interrupt window. */
-+		nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
-+		    NVMM_X64_STATE_MISC);
-+		state.misc[NVMM_X64_MISC_INT_WINDOW_EXIT] = 1;
-+		nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
-+		    NVMM_X64_STATE_MISC);
++        /* Exit on interrupt window. */
++        nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
++            NVMM_X64_STATE_MISC);
++        state.misc[NVMM_X64_MISC_INT_WINDOW_EXIT] = 1;
++        nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
++            NVMM_X64_STATE_MISC);
 +
-+		return false;
-+	}
++        return false;
++    }
 +
-+	return true;
++    return true;
 +}
 +
 +static bool
 +nvmm_can_take_nmi(CPUState *cpu)
 +{
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+
-+	/*
-+	 * Contrary to INTs, NMIs always schedule an exit when they are
-+	 * completed. Therefore, if window-exiting is enabled, it means
-+	 * NMIs are blocked.
-+	 */
-+	if (vcpu->nmi_window_exit) {
-+		return false;
-+	}
-+
-+	return true;
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++
++    /*
++     * Contrary to INTs, NMIs always schedule an exit when they are
++     * completed. Therefore, if window-exiting is enabled, it means
++     * NMIs are blocked.
++     */
++    if (vcpu->nmi_window_exit) {
++        return false;
++    }
++
++    return true;
 +}
 +
 +/*
@@ -828,85 +759,85 @@ Add NVMM support.
 +static void
 +nvmm_vcpu_pre_run(CPUState *cpu)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	X86CPU *x86_cpu = X86_CPU(cpu);
-+	struct nvmm_x64_state state;
-+	struct nvmm_event event;
-+	bool has_event = false;
-+	bool sync_tpr = false;
-+	uint8_t tpr;
-+	int ret;
-+
-+	memset(&event, 0, sizeof(event));
-+
-+	qemu_mutex_lock_iothread();
-+
-+	tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
-+	if (tpr != vcpu->tpr) {
-+		vcpu->tpr = tpr;
-+		sync_tpr = true;
-+	}
-+
-+	/*
-+	 * Force the VCPU out of its inner loop to process any INIT requests
-+	 * or commit pending TPR access.
-+	 */
-+	if (cpu->interrupt_request & (CPU_INTERRUPT_INIT|CPU_INTERRUPT_TPR)) {
-+		cpu->exit_request = 1;
-+	}
-+
-+	if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+		if (nvmm_can_take_nmi(cpu)) {
-+			cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
-+			event.type = NVMM_EVENT_INTERRUPT_HW;
-+			event.vector = 2;
-+			has_event = true;
-+		}
-+	}
-+
-+	if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
-+		if (nvmm_can_take_int(cpu)) {
-+			cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
-+			event.type = NVMM_EVENT_INTERRUPT_HW;
-+			event.vector = cpu_get_pic_interrupt(env);
-+			has_event = true;
-+		}
-+	}
-+
-+	/* Don't want SMIs. */
-+	if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
-+		cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
-+	}
-+
-+	if (sync_tpr) {
-+		ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
-+		    NVMM_X64_STATE_CRS);
-+		if (ret == -1) {
-+			error_report("NVMM: Failed to get CPU state,"
-+			    " error=%d", errno);
-+		}
-+
-+		state.crs[NVMM_X64_CR_CR8] = vcpu->tpr;
-+
-+		ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
-+		    NVMM_X64_STATE_CRS);
-+		if (ret == -1) {
-+			error_report("NVMM: Failed to set CPU state,"
-+			    " error=%d", errno);
-+		}
-+	}
-+
-+	if (has_event) {
-+		ret = nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
-+		if (ret == -1) {
-+			error_report("NVMM: Failed to inject event,"
-+			    " error=%d", errno);
-+		}
-+	}
-+
-+	qemu_mutex_unlock_iothread();
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    struct nvmm_machine *mach = get_nvmm_mach();
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    X86CPU *x86_cpu = X86_CPU(cpu);
++    struct nvmm_x64_state state;
++    struct nvmm_event event;
++    bool has_event = false;
++    bool sync_tpr = false;
++    uint8_t tpr;
++    int ret;
++
++    memset(&event, 0, sizeof(event));
++
++    qemu_mutex_lock_iothread();
++
++    tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
++    if (tpr != vcpu->tpr) {
++        vcpu->tpr = tpr;
++        sync_tpr = true;
++    }
++
++    /*
++     * Force the VCPU out of its inner loop to process any INIT requests
++     * or commit pending TPR access.
++     */
++    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT|CPU_INTERRUPT_TPR)) {
++        cpu->exit_request = 1;
++    }
++
++    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
++        if (nvmm_can_take_nmi(cpu)) {
++            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
++            event.type = NVMM_EVENT_INTERRUPT_HW;
++            event.vector = 2;
++            has_event = true;
++        }
++    }
++
++    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
++        if (nvmm_can_take_int(cpu)) {
++            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
++            event.type = NVMM_EVENT_INTERRUPT_HW;
++            event.vector = cpu_get_pic_interrupt(env);
++            has_event = true;
++        }
++    }
++
++    /* Don't want SMIs. */
++    if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
++        cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
++    }
++
++    if (sync_tpr) {
++        ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
++            NVMM_X64_STATE_CRS);
++        if (ret == -1) {
++            error_report("NVMM: Failed to get CPU state,"
++                " error=%d", errno);
++        }
++
++        state.crs[NVMM_X64_CR_CR8] = vcpu->tpr;
++
++        ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
++            NVMM_X64_STATE_CRS);
++        if (ret == -1) {
++            error_report("NVMM: Failed to set CPU state,"
++                " error=%d", errno);
++        }
++    }
++
++    if (has_event) {
++        ret = nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
++        if (ret == -1) {
++            error_report("NVMM: Failed to inject event,"
++                " error=%d", errno);
++        }
++    }
++
++    qemu_mutex_unlock_iothread();
 +}
 +
 +/*
@@ -916,27 +847,27 @@ Add NVMM support.
 +static void
 +nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_exit *exit)
 +{
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	X86CPU *x86_cpu = X86_CPU(cpu);
-+	uint64_t tpr;
-+
-+	env->eflags = exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS];
-+
-+	vcpu->int_shadow =
-+	    exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW];
-+	vcpu->int_window_exit =
-+	    exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT];
-+	vcpu->nmi_window_exit =
-+	    exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT];
-+
-+	tpr = exit->exitstate[NVMM_X64_EXITSTATE_CR8];
-+	if (vcpu->tpr != tpr) {
-+		vcpu->tpr = tpr;
-+		qemu_mutex_lock_iothread();
-+		cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
-+		qemu_mutex_unlock_iothread();
-+	}
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    X86CPU *x86_cpu = X86_CPU(cpu);
++    uint64_t tpr;
++
++    env->eflags = exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS];
++
++    vcpu->int_shadow =
++        exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW];
++    vcpu->int_window_exit =
++        exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT];
++    vcpu->nmi_window_exit =
++        exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT];
++
++    tpr = exit->exitstate[NVMM_X64_EXITSTATE_CR8];
++    if (vcpu->tpr != tpr) {
++        vcpu->tpr = tpr;
++        qemu_mutex_lock_iothread();
++        cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
++        qemu_mutex_unlock_iothread();
++    }
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -944,33 +875,33 @@ Add NVMM support.
 +static void
 +nvmm_io_callback(struct nvmm_io *io)
 +{
-+	MemTxAttrs attrs = { 0 };
-+	int ret;
-+
-+	ret = address_space_rw(&address_space_io, io->port, attrs, io->data,
-+	    io->size, !io->in);
-+	if (ret != MEMTX_OK) {
-+		error_report("NVMM: I/O Transaction Failed "
-+		    "[%s, port=%lu, size=%zu]", (io->in ? "in" : "out"),
-+		    io->port, io->size);
-+	}
-+
-+	/* XXX Needed, otherwise infinite loop. */
-+	current_cpu->vcpu_dirty = false;
++    MemTxAttrs attrs = { 0 };
++    int ret;
++
++    ret = address_space_rw(&address_space_io, io->port, attrs, io->data,
++        io->size, !io->in);
++    if (ret != MEMTX_OK) {
++        error_report("NVMM: I/O Transaction Failed "
++            "[%s, port=%lu, size=%zu]", (io->in ? "in" : "out"),
++            io->port, io->size);
++    }
++
++    /* XXX Needed, otherwise infinite loop. */
++    current_cpu->vcpu_dirty = false;
 +}
 +
 +static void
 +nvmm_mem_callback(struct nvmm_mem *mem)
 +{
-+	cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
++    cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
 +
-+	/* XXX Needed, otherwise infinite loop. */
-+	current_cpu->vcpu_dirty = false;
++    /* XXX Needed, otherwise infinite loop. */
++    current_cpu->vcpu_dirty = false;
 +}
 +
 +static const struct nvmm_callbacks nvmm_callbacks = {
-+	.io = nvmm_io_callback,
-+	.mem = nvmm_mem_callback
++    .io = nvmm_io_callback,
++    .mem = nvmm_mem_callback
 +};
 +
 +/* -------------------------------------------------------------------------- */
@@ -979,245 +910,245 @@ Add NVMM support.
 +nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
 +    struct nvmm_exit *exit)
 +{
-+	int ret;
++    int ret;
 +
-+	ret = nvmm_assist_mem(mach, vcpu->cpuid, exit);
-+	if (ret == -1) {
-+		error_report("NVMM: Mem Assist Failed [gpa=%p]",
-+		    (void *)exit->u.mem.gpa);
-+	}
++    ret = nvmm_assist_mem(mach, vcpu->cpuid, exit);
++    if (ret == -1) {
++        error_report("NVMM: Mem Assist Failed [gpa=%p]",
++            (void *)exit->u.mem.gpa);
++    }
 +
-+	return ret;
++    return ret;
 +}
 +
 +static int
 +nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
 +    struct nvmm_exit *exit)
 +{
-+	int ret;
++    int ret;
 +
-+	ret = nvmm_assist_io(mach, vcpu->cpuid, exit);
-+	if (ret == -1) {
-+		error_report("NVMM: I/O Assist Failed [port=%d]",
-+		    (int)exit->u.io.port);
-+	}
++    ret = nvmm_assist_io(mach, vcpu->cpuid, exit);
++    if (ret == -1) {
++        error_report("NVMM: I/O Assist Failed [port=%d]",
++            (int)exit->u.io.port);
++    }
 +
-+	return ret;
++    return ret;
 +}
 +
 +static int
 +nvmm_handle_msr(struct nvmm_machine *mach, CPUState *cpu,
 +    struct nvmm_exit *exit)
 +{
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	X86CPU *x86_cpu = X86_CPU(cpu);
-+	struct nvmm_x64_state state;
-+	uint64_t val;
-+	int ret;
-+
-+	val = exit->u.msr.val;
-+
-+	switch (exit->u.msr.msr) {
-+	case MSR_IA32_APICBASE:
-+		if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+			val = cpu_get_apic_base(x86_cpu->apic_state);
-+		} else {
-+			cpu_set_apic_base(x86_cpu->apic_state, val);
-+		}
-+		break;
-+	default:
-+		// TODO: more MSRs to add?
-+		error_report("NVMM: Unexpected MSR 0x%lx, ignored",
-+		    exit->u.msr.msr);
-+		if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+			val = 0;
-+		}
-+		break;
-+	}
-+
-+	ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
-+	    NVMM_X64_STATE_GPRS);
-+	if (ret == -1) {
-+		return -1;
-+	}
-+
-+	if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
-+		state.gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
-+		state.gprs[NVMM_X64_GPR_RDX] = (val >> 32);
-+	}
-+
-+	state.gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
-+
-+	ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
-+	    NVMM_X64_STATE_GPRS);
-+	if (ret == -1) {
-+		return -1;
-+	}
-+
-+	return 0;
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    X86CPU *x86_cpu = X86_CPU(cpu);
++    struct nvmm_x64_state state;
++    uint64_t val;
++    int ret;
++
++    val = exit->u.msr.val;
++
++    switch (exit->u.msr.msr) {
++    case MSR_IA32_APICBASE:
++        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
++            val = cpu_get_apic_base(x86_cpu->apic_state);
++        } else {
++            cpu_set_apic_base(x86_cpu->apic_state, val);
++        }
++        break;
++    default:
++        // TODO: more MSRs to add?
++        error_report("NVMM: Unexpected MSR 0x%lx, ignored",
++            exit->u.msr.msr);
++        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
++            val = 0;
++        }
++        break;
++    }
++
++    ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
++        NVMM_X64_STATE_GPRS);
++    if (ret == -1) {
++        return -1;
++    }
++
++    if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
++        state.gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
++        state.gprs[NVMM_X64_GPR_RDX] = (val >> 32);
++    }
++
++    state.gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
++
++    ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
++        NVMM_X64_STATE_GPRS);
++    if (ret == -1) {
++        return -1;
++    }
++
++    return 0;
 +}
 +
 +static int
 +nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
 +    struct nvmm_exit *exit)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	int ret = 0;
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    int ret = 0;
 +
-+	qemu_mutex_lock_iothread();
++    qemu_mutex_lock_iothread();
 +
-+	if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
-+	      (env->eflags & IF_MASK)) &&
-+	    !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+		cpu->exception_index = EXCP_HLT;
-+		cpu->halted = true;
-+		ret = 1;
-+	}
++    if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
++          (env->eflags & IF_MASK)) &&
++        !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
++        cpu->exception_index = EXCP_HLT;
++        cpu->halted = true;
++        ret = 1;
++    }
 +
-+	qemu_mutex_unlock_iothread();
++    qemu_mutex_unlock_iothread();
 +
-+	return ret;
++    return ret;
 +}
 +
 +static int
 +nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 +{
-+	struct nvmm_event event;
++    struct nvmm_event event;
 +
-+	event.type = NVMM_EVENT_EXCEPTION;
-+	event.vector = 6;
-+	event.u.error = 0;
++    event.type = NVMM_EVENT_EXCEPTION;
++    event.vector = 6;
++    event.u.error = 0;
 +
-+	return nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
++    return nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
 +}
 +
 +static int
 +nvmm_vcpu_loop(CPUState *cpu)
 +{
-+	struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
-+	X86CPU *x86_cpu = X86_CPU(cpu);
-+	struct nvmm_exit exit;
-+	int ret;
-+
-+	/*
-+	 * Some asynchronous events must be handled outside of the inner
-+	 * VCPU loop. They are handled here.
-+	 */
-+	if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
-+		nvmm_cpu_synchronize_state(cpu);
-+		do_cpu_init(x86_cpu);
-+		/* XXX: reset the INT/NMI windows */
-+	}
-+	if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
-+		cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
-+		apic_poll_irq(x86_cpu->apic_state);
-+	}
-+	if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
-+	     (env->eflags & IF_MASK)) ||
-+	    (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-+		cpu->halted = false;
-+	}
-+	if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
-+		nvmm_cpu_synchronize_state(cpu);
-+		do_cpu_sipi(x86_cpu);
-+	}
-+	if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
-+		cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
-+		nvmm_cpu_synchronize_state(cpu);
-+		apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
-+		    env->tpr_access_type);
-+	}
-+
-+	if (cpu->halted) {
-+		cpu->exception_index = EXCP_HLT;
-+		atomic_set(&cpu->exit_request, false);
-+		return 0;
-+	}
-+
-+	qemu_mutex_unlock_iothread();
-+	cpu_exec_start(cpu);
-+
-+	/*
-+	 * Inner VCPU loop.
-+	 */
-+	do {
-+		if (cpu->vcpu_dirty) {
-+			nvmm_set_registers(cpu);
-+			cpu->vcpu_dirty = false;
-+		}
-+
-+		if (vcpu->stop) {
-+			cpu->exception_index = EXCP_INTERRUPT;
-+			vcpu->stop = false;
-+			ret = 1;
-+			break;
-+		}
-+
-+		nvmm_vcpu_pre_run(cpu);
-+
-+		if (atomic_read(&cpu->exit_request)) {
-+			qemu_cpu_kick_self();
-+		}
-+
-+		ret = nvmm_vcpu_run(mach, vcpu->cpuid, &exit);
-+		if (ret == -1) {
-+			error_report("NVMM: Failed to exec a virtual processor,"
-+			    " error=%d", errno);
-+			break;
-+		}
-+
-+		nvmm_vcpu_post_run(cpu, &exit);
-+
-+		switch (exit.reason) {
-+		case NVMM_EXIT_NONE:
-+			break;
-+		case NVMM_EXIT_MEMORY:
-+			ret = nvmm_handle_mem(mach, vcpu, &exit);
-+			break;
-+		case NVMM_EXIT_IO:
-+			ret = nvmm_handle_io(mach, vcpu, &exit);
-+			break;
-+		case NVMM_EXIT_MSR:
-+			ret = nvmm_handle_msr(mach, cpu, &exit);
-+			break;
-+		case NVMM_EXIT_INT_READY:
-+		case NVMM_EXIT_NMI_READY:
-+			break;
-+		case NVMM_EXIT_MONITOR:
-+		case NVMM_EXIT_MWAIT:
-+		case NVMM_EXIT_MWAIT_COND:
-+			ret = nvmm_inject_ud(mach, vcpu);
-+			break;
-+		case NVMM_EXIT_HALTED:
-+			ret = nvmm_handle_halted(mach, cpu, &exit);
-+			break;
-+		case NVMM_EXIT_SHUTDOWN:
-+			qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
-+			cpu->exception_index = EXCP_INTERRUPT;
-+			ret = 1;
-+			break;
-+
-+		default:
-+			error_report("NVMM: Unexpected VM exit code %lu",
-+			    exit.reason);
-+			nvmm_get_registers(cpu);
-+			qemu_mutex_lock_iothread();
-+			qemu_system_guest_panicked(cpu_get_crash_info(cpu));
-+			qemu_mutex_unlock_iothread();
-+			ret = -1;
-+			break;
-+		}
-+	} while (ret == 0);
-+
-+	cpu_exec_end(cpu);
-+	qemu_mutex_lock_iothread();
-+	current_cpu = cpu;
-+
-+	atomic_set(&cpu->exit_request, false);
-+
-+	return ret < 0;
++    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
++    struct nvmm_machine *mach = get_nvmm_mach();
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    X86CPU *x86_cpu = X86_CPU(cpu);
++    struct nvmm_exit exit;
++    int ret;
++
++    /*
++     * Some asynchronous events must be handled outside of the inner
++     * VCPU loop. They are handled here.
++     */
++    if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
++        nvmm_cpu_synchronize_state(cpu);
++        do_cpu_init(x86_cpu);
++        /* XXX: reset the INT/NMI windows */
++    }
++    if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
++        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
++        apic_poll_irq(x86_cpu->apic_state);
++    }
++    if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
++         (env->eflags & IF_MASK)) ||
++        (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
++        cpu->halted = false;
++    }
++    if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
++        nvmm_cpu_synchronize_state(cpu);
++        do_cpu_sipi(x86_cpu);
++    }
++    if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
++        cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
++        nvmm_cpu_synchronize_state(cpu);
++        apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
++            env->tpr_access_type);
++    }
++
++    if (cpu->halted) {
++        cpu->exception_index = EXCP_HLT;
++        atomic_set(&cpu->exit_request, false);
++        return 0;
++    }
++
++    qemu_mutex_unlock_iothread();
++    cpu_exec_start(cpu);
++
++    /*
++     * Inner VCPU loop.
++     */
++    do {
++        if (cpu->vcpu_dirty) {
++            nvmm_set_registers(cpu);
++            cpu->vcpu_dirty = false;
++        }
++
++        if (vcpu->stop) {
++            cpu->exception_index = EXCP_INTERRUPT;
++            vcpu->stop = false;
++            ret = 1;
++            break;
++        }
++
++        nvmm_vcpu_pre_run(cpu);
++
++        if (atomic_read(&cpu->exit_request)) {
++            qemu_cpu_kick_self();
++        }
++
++        ret = nvmm_vcpu_run(mach, vcpu->cpuid, &exit);
++        if (ret == -1) {
++            error_report("NVMM: Failed to exec a virtual processor,"
++                " error=%d", errno);
++            break;
++        }
++
++        nvmm_vcpu_post_run(cpu, &exit);
++
++        switch (exit.reason) {
++        case NVMM_EXIT_NONE:
++            break;
++        case NVMM_EXIT_MEMORY:
++            ret = nvmm_handle_mem(mach, vcpu, &exit);
++            break;
++        case NVMM_EXIT_IO:
++            ret = nvmm_handle_io(mach, vcpu, &exit);
++            break;
++        case NVMM_EXIT_MSR:
++            ret = nvmm_handle_msr(mach, cpu, &exit);
++            break;
++        case NVMM_EXIT_INT_READY:
++        case NVMM_EXIT_NMI_READY:
++            break;
++        case NVMM_EXIT_MONITOR:
++        case NVMM_EXIT_MWAIT:
++        case NVMM_EXIT_MWAIT_COND:
++            ret = nvmm_inject_ud(mach, vcpu);
++            break;
++        case NVMM_EXIT_HALTED:
++            ret = nvmm_handle_halted(mach, cpu, &exit);
++            break;
++        case NVMM_EXIT_SHUTDOWN:
++            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
++            cpu->exception_index = EXCP_INTERRUPT;
++            ret = 1;
++            break;
++
++        default:
++            error_report("NVMM: Unexpected VM exit code %lu",
++                exit.reason);
++            nvmm_get_registers(cpu);
++            qemu_mutex_lock_iothread();
++            qemu_system_guest_panicked(cpu_get_crash_info(cpu));
++            qemu_mutex_unlock_iothread();
++            ret = -1;
++            break;
++        }
++    } while (ret == 0);
++
++    cpu_exec_end(cpu);
++    qemu_mutex_lock_iothread();
++    current_cpu = cpu;
++
++    atomic_set(&cpu->exit_request, false);
++
++    return ret < 0;
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -1225,50 +1156,50 @@ Add NVMM support.
 +static void
 +do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
 +{
-+	nvmm_get_registers(cpu);
-+	cpu->vcpu_dirty = true;
++    nvmm_get_registers(cpu);
++    cpu->vcpu_dirty = true;
 +}
 +
 +static void
 +do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
 +{
-+	nvmm_set_registers(cpu);
-+	cpu->vcpu_dirty = false;
++    nvmm_set_registers(cpu);
++    cpu->vcpu_dirty = false;
 +}
 +
 +static void
 +do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
 +{
-+	nvmm_set_registers(cpu);
-+	cpu->vcpu_dirty = false;
++    nvmm_set_registers(cpu);
++    cpu->vcpu_dirty = false;
 +}
 +
 +static void
 +do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
 +{
-+	cpu->vcpu_dirty = true;
++    cpu->vcpu_dirty = true;
 +}
 +
 +void nvmm_cpu_synchronize_state(CPUState *cpu)
 +{
-+	if (!cpu->vcpu_dirty) {
-+		run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL);
-+	}
++    if (!cpu->vcpu_dirty) {
++        run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL);
++    }
 +}
 +
 +void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
 +{
-+	run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
++    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
 +}
 +
 +void nvmm_cpu_synchronize_post_init(CPUState *cpu)
 +{
-+	run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
++    run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
 +}
 +
 +void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
 +{
-+	run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
++    run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -1278,106 +1209,106 @@ Add NVMM support.
 +static void
 +nvmm_ipi_signal(int sigcpu)
 +{
-+	struct nvmm_vcpu *vcpu;
++    struct nvmm_vcpu *vcpu;
 +
-+	if (current_cpu) {
-+		vcpu = get_nvmm_vcpu(current_cpu);
-+		vcpu->stop = true;
-+	}
++    if (current_cpu) {
++        vcpu = get_nvmm_vcpu(current_cpu);
++        vcpu->stop = true;
++    }
 +}
 +
 +static void
 +nvmm_init_cpu_signals(void)
 +{
-+	struct sigaction sigact;
-+	sigset_t set;
-+
-+	/* Install the IPI handler. */
-+	memset(&sigact, 0, sizeof(sigact));
-+	sigact.sa_handler = nvmm_ipi_signal;
-+	sigaction(SIG_IPI, &sigact, NULL);
-+
-+	/* Allow IPIs on the current thread. */
-+	sigprocmask(SIG_BLOCK, NULL, &set);
-+	sigdelset(&set, SIG_IPI);
-+	pthread_sigmask(SIG_SETMASK, &set, NULL);
++    struct sigaction sigact;
++    sigset_t set;
++
++    /* Install the IPI handler. */
++    memset(&sigact, 0, sizeof(sigact));
++    sigact.sa_handler = nvmm_ipi_signal;
++    sigaction(SIG_IPI, &sigact, NULL);
++
++    /* Allow IPIs on the current thread. */
++    sigprocmask(SIG_BLOCK, NULL, &set);
++    sigdelset(&set, SIG_IPI);
++    pthread_sigmask(SIG_SETMASK, &set, NULL);
 +}
 +
 +int
 +nvmm_init_vcpu(CPUState *cpu)
 +{
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	Error *local_error = NULL;
-+	struct nvmm_vcpu *vcpu;
-+	int ret;
-+
-+	nvmm_init_cpu_signals();
-+
-+	if (nvmm_migration_blocker == NULL) {
-+		error_setg(&nvmm_migration_blocker,
-+		    "NVMM: Migration not supported");
-+
-+		(void)migrate_add_blocker(nvmm_migration_blocker, &local_error);
-+		if (local_error) {
-+			error_report_err(local_error);
-+			migrate_del_blocker(nvmm_migration_blocker);
-+			error_free(nvmm_migration_blocker);
-+			return -EINVAL;
-+		}
-+	}
-+
-+	vcpu = g_malloc0(sizeof(struct nvmm_vcpu));
-+	if (vcpu == NULL) {
-+		error_report("NVMM: Failed to allocate VCPU context.");
-+		return -ENOMEM;
-+	}
-+	vcpu->cpuid = cpu->cpu_index;
-+
-+	ret = nvmm_vcpu_create(mach, vcpu->cpuid);
-+	if (ret == -1) {
-+		error_report("NVMM: Failed to create a virtual processor,"
-+		    " error=%d", errno);
-+		g_free(vcpu);
-+		return -EINVAL;
-+	}
-+
-+	cpu->vcpu_dirty = true;
-+	cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
-+
-+	return 0;
++    struct nvmm_machine *mach = get_nvmm_mach();
++    Error *local_error = NULL;
++    struct nvmm_vcpu *vcpu;
++    int ret;
++
++    nvmm_init_cpu_signals();
++
++    if (nvmm_migration_blocker == NULL) {
++        error_setg(&nvmm_migration_blocker,
++            "NVMM: Migration not supported");
++
++        (void)migrate_add_blocker(nvmm_migration_blocker, &local_error);
++        if (local_error) {
++            error_report_err(local_error);
++            migrate_del_blocker(nvmm_migration_blocker);
++            error_free(nvmm_migration_blocker);
++            return -EINVAL;
++        }
++    }
++
++    vcpu = g_malloc0(sizeof(struct nvmm_vcpu));
++    if (vcpu == NULL) {
++        error_report("NVMM: Failed to allocate VCPU context.");
++        return -ENOMEM;
++    }
++    vcpu->cpuid = cpu->cpu_index;
++
++    ret = nvmm_vcpu_create(mach, vcpu->cpuid);
++    if (ret == -1) {
++        error_report("NVMM: Failed to create a virtual processor,"
++            " error=%d", errno);
++        g_free(vcpu);
++        return -EINVAL;
++    }
++
++    cpu->vcpu_dirty = true;
++    cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
++
++    return 0;
 +}
 +
 +int
 +nvmm_vcpu_exec(CPUState *cpu)
 +{
-+	int ret, fatal;
++    int ret, fatal;
 +
-+	while (1) {
-+		if (cpu->exception_index >= EXCP_INTERRUPT) {
-+			ret = cpu->exception_index;
-+			cpu->exception_index = -1;
-+			break;
-+		}
++    while (1) {
++        if (cpu->exception_index >= EXCP_INTERRUPT) {
++            ret = cpu->exception_index;
++            cpu->exception_index = -1;
++            break;
++        }
 +
-+		fatal = nvmm_vcpu_loop(cpu);
++        fatal = nvmm_vcpu_loop(cpu);
 +
-+		if (fatal) {
-+			error_report("NVMM: Failed to execute a VCPU.");
-+			abort();
-+		}
-+	}
++        if (fatal) {
++            error_report("NVMM: Failed to execute a VCPU.");
++            abort();
++        }
++    }
 +
-+	return ret;
++    return ret;
 +}
 +
 +void
 +nvmm_destroy_vcpu(CPUState *cpu)
 +{
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
++    struct nvmm_machine *mach = get_nvmm_mach();
++    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
 +
-+	nvmm_vcpu_destroy(mach, vcpu->cpuid);
-+	g_free(cpu->hax_vcpu);
++    nvmm_vcpu_destroy(mach, vcpu->cpuid);
++    g_free(cpu->hax_vcpu);
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -1386,123 +1317,123 @@ Add NVMM support.
 +nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva,
 +    bool add, bool rom, const char *name)
 +{
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	int ret;
-+
-+	// TODO rom read-only?
-+
-+	if (add) {
-+		ret = nvmm_gpa_map(mach, hva, start_pa, size, 0);
-+	} else {
-+		ret = nvmm_gpa_unmap(mach, hva, start_pa, size);
-+	}
-+
-+	if (ret == -1) {
-+		error_report("NVMM: Failed to %s GPA range '%s' PA:%p, "
-+		    "Size:%p bytes, HostVA:%p, error=%d",
-+		    (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa,
-+		    (void *)size, (void *)hva, errno);
-+	}
++    struct nvmm_machine *mach = get_nvmm_mach();
++    int ret;
++
++    // TODO rom read-only?
++
++    if (add) {
++        ret = nvmm_gpa_map(mach, hva, start_pa, size, 0);
++    } else {
++        ret = nvmm_gpa_unmap(mach, hva, start_pa, size);
++    }
++
++    if (ret == -1) {
++        error_report("NVMM: Failed to %s GPA range '%s' PA:%p, "
++            "Size:%p bytes, HostVA:%p, error=%d",
++            (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa,
++            (void *)size, (void *)hva, errno);
++    }
 +}
 +
 +static void
 +nvmm_process_section(MemoryRegionSection *section, int add)
 +{
-+	MemoryRegion *mr = section->mr;
-+	hwaddr start_pa = section->offset_within_address_space;
-+	ram_addr_t size = int128_get64(section->size);
-+	unsigned int delta;
-+	uintptr_t hva;
-+
-+	if (!memory_region_is_ram(mr)) {
-+		return;
-+	}
-+
-+	/* Adjust start_pa and size so that they are page-aligned. */
-+	delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
-+	delta &= ~qemu_real_host_page_mask;
-+	if (delta > size) {
-+		return;
-+	}
-+	start_pa += delta;
-+	size -= delta;
-+	size &= qemu_real_host_page_mask;
-+	if (!size || (start_pa & ~qemu_real_host_page_mask)) {
-+		return;
-+	}
-+
-+	hva = (uintptr_t)memory_region_get_ram_ptr(mr) +
-+	    section->offset_within_region + delta;
-+
-+	nvmm_update_mapping(start_pa, size, hva, add,
-+	    memory_region_is_rom(mr), mr->name);
++    MemoryRegion *mr = section->mr;
++    hwaddr start_pa = section->offset_within_address_space;
++    ram_addr_t size = int128_get64(section->size);
++    unsigned int delta;
++    uintptr_t hva;
++
++    if (!memory_region_is_ram(mr)) {
++        return;
++    }
++
++    /* Adjust start_pa and size so that they are page-aligned. */
++    delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
++    delta &= ~qemu_real_host_page_mask;
++    if (delta > size) {
++        return;
++    }
++    start_pa += delta;
++    size -= delta;
++    size &= qemu_real_host_page_mask;
++    if (!size || (start_pa & ~qemu_real_host_page_mask)) {
++        return;
++    }
++
++    hva = (uintptr_t)memory_region_get_ram_ptr(mr) +
++        section->offset_within_region + delta;
++
++    nvmm_update_mapping(start_pa, size, hva, add,
++        memory_region_is_rom(mr), mr->name);
 +}
 +
 +static void
 +nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section)
 +{
-+	memory_region_ref(section->mr);
-+	nvmm_process_section(section, 1);
++    memory_region_ref(section->mr);
++    nvmm_process_section(section, 1);
 +}
 +
 +static void
 +nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section)
 +{
-+	nvmm_process_section(section, 0);
-+	memory_region_unref(section->mr);
++    nvmm_process_section(section, 0);
++    memory_region_unref(section->mr);
 +}
 +
 +static void
 +nvmm_transaction_begin(MemoryListener *listener)
 +{
-+	/* nothing */
++    /* nothing */
 +}
 +
 +static void
 +nvmm_transaction_commit(MemoryListener *listener)
 +{
-+	/* nothing */
++    /* nothing */
 +}
 +
 +static void
 +nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section)
 +{
-+	MemoryRegion *mr = section->mr;
++    MemoryRegion *mr = section->mr;
 +
-+	if (!memory_region_is_ram(mr)) {
-+		return;
-+	}
++    if (!memory_region_is_ram(mr)) {
++        return;
++    }
 +
-+	memory_region_set_dirty(mr, 0, int128_get64(section->size));
++    memory_region_set_dirty(mr, 0, int128_get64(section->size));
 +}
 +
 +static MemoryListener nvmm_memory_listener = {
-+	.begin = nvmm_transaction_begin,
-+	.commit = nvmm_transaction_commit,
-+	.region_add = nvmm_region_add,
-+	.region_del = nvmm_region_del,
-+	.log_sync = nvmm_log_sync,
-+	.priority = 10,
++    .begin = nvmm_transaction_begin,
++    .commit = nvmm_transaction_commit,
++    .region_add = nvmm_region_add,
++    .region_del = nvmm_region_del,
++    .log_sync = nvmm_log_sync,
++    .priority = 10,
 +};
 +
 +static void
 +nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
 +{
-+	struct nvmm_machine *mach = get_nvmm_mach();
-+	uintptr_t hva = (uintptr_t)host;
-+	int ret;
++    struct nvmm_machine *mach = get_nvmm_mach();
++    uintptr_t hva = (uintptr_t)host;
++    int ret;
 +
-+	ret = nvmm_hva_map(mach, hva, size);
++    ret = nvmm_hva_map(mach, hva, size);
 +
-+	if (ret == -1) {
-+		error_report("NVMM: Failed to map HVA, HostVA:%p "
-+		    "Size:%p bytes, error=%d",
-+		    (void *)hva, (void *)size, errno);
-+	}
++    if (ret == -1) {
++        error_report("NVMM: Failed to map HVA, HostVA:%p "
++            "Size:%p bytes, error=%d",
++            (void *)hva, (void *)size, errno);
++    }
 +}
 +
 +static struct RAMBlockNotifier nvmm_ram_notifier = {
-+	.ram_block_added = nvmm_ram_block_added
++    .ram_block_added = nvmm_ram_block_added
 +};
 +
 +/* -------------------------------------------------------------------------- */
@@ -1510,11 +1441,11 @@ Add NVMM support.
 +static void
 +nvmm_handle_interrupt(CPUState *cpu, int mask)
 +{
-+	cpu->interrupt_request |= mask;
++    cpu->interrupt_request |= mask;
 +
-+	if (!qemu_cpu_is_self(cpu)) {
-+		qemu_cpu_kick(cpu);
-+	}
++    if (!qemu_cpu_is_self(cpu)) {
++        qemu_cpu_kick(cpu);
++    }
 +}
 +
 +/* -------------------------------------------------------------------------- */
@@ -1522,107 +1453,107 @@ Add NVMM support.
 +static int
 +nvmm_accel_configure(struct nvmm_machine *mach)
 +{
-+	struct nvmm_x86_conf_cpuid cpuid;
-+	int ret;
-+
-+	/* Delete the Monitor and MTRR bits, set the Hypervisor bit. */
-+	memset(&cpuid, 0, sizeof(cpuid));
-+	cpuid.leaf = 0x00000001;
-+	cpuid.del.ecx = CPUID_EXT_MONITOR;
-+	cpuid.del.edx = CPUID_MCE | CPUID_MTRR;
-+	cpuid.set.ecx = CPUID_EXT_HYPERVISOR;
-+
-+	ret = nvmm_machine_configure(mach, NVMM_X86_CONF_CPUID, &cpuid);
-+	if (ret == -1)
-+		return -1;
-+
-+	/* Delete the OSVW bit. */
-+	memset(&cpuid, 0, sizeof(cpuid));
-+	cpuid.leaf = 0x80000001;
-+	cpuid.del.ecx = CPUID_EXT3_OSVW;
-+
-+	ret = nvmm_machine_configure(mach, NVMM_X86_CONF_CPUID, &cpuid);
-+	if (ret == -1)
-+		return -1;
-+
-+	return 0;
++    struct nvmm_x86_conf_cpuid cpuid;
++    int ret;
++
++    /* Delete the Monitor and MTRR bits, set the Hypervisor bit. */
++    memset(&cpuid, 0, sizeof(cpuid));
++    cpuid.leaf = 0x00000001;
++    cpuid.del.ecx = CPUID_EXT_MONITOR;
++    cpuid.del.edx = CPUID_MCE | CPUID_MTRR;
++    cpuid.set.ecx = CPUID_EXT_HYPERVISOR;
++
++    ret = nvmm_machine_configure(mach, NVMM_X86_CONF_CPUID, &cpuid);
++    if (ret == -1)
++        return -1;
++
++    /* Delete the OSVW bit. */
++    memset(&cpuid, 0, sizeof(cpuid));
++    cpuid.leaf = 0x80000001;
++    cpuid.del.ecx = CPUID_EXT3_OSVW;
++
++    ret = nvmm_machine_configure(mach, NVMM_X86_CONF_CPUID, &cpuid);
++    if (ret == -1)
++        return -1;
++
++    return 0;
 +}
 +
 +static int
 +nvmm_accel_init(MachineState *ms)
 +{
-+	struct nvmm_capability cap;
-+	int ret;
-+
-+	ret = nvmm_capability(&cap);
-+	if (ret == -1) {
-+		error_report("NVMM: No accelerator found, error=%d", errno);
-+		return -ENOSPC;
-+	}
-+	if (cap.version != 1) {
-+		error_report("NVMM: Unsupported version %lu", cap.version);
-+		return -ENOSPC;
-+	}
-+	if (cap.state_size != sizeof(struct nvmm_x64_state)) {
-+		error_report("NVMM: Wrong state size %zu", cap.state_size);
-+		return -ENOSPC;
-+	}
-+
-+	ret = nvmm_machine_create(&nvmm_global.mach);
-+	if (ret == -1) {
-+		error_report("NVMM: Machine creation failed, error=%d", errno);
-+		return -ENOSPC;
-+	}
-+
-+	ret = nvmm_accel_configure(&nvmm_global.mach);
-+	if (ret == -1) {
-+		error_report("NVMM: Machine configuration failed, error=%d",
-+		    errno);
-+		return -ENOSPC;
-+	}
-+
-+	nvmm_callbacks_register(&nvmm_callbacks);
-+
-+	memory_listener_register(&nvmm_memory_listener, &address_space_memory);
-+	ram_block_notifier_add(&nvmm_ram_notifier);
-+
-+	cpu_interrupt_handler = nvmm_handle_interrupt;
-+
-+	printf("NetBSD Virtual Machine Monitor accelerator is operational\n");
-+	return 0;
++    struct nvmm_capability cap;
++    int ret;
++
++    ret = nvmm_capability(&cap);
++    if (ret == -1) {
++        error_report("NVMM: No accelerator found, error=%d", errno);
++        return -ENOSPC;
++    }
++    if (cap.version != 1) {
++        error_report("NVMM: Unsupported version %lu", cap.version);
++        return -ENOSPC;
++    }
++    if (cap.state_size != sizeof(struct nvmm_x64_state)) {
++        error_report("NVMM: Wrong state size %zu", cap.state_size);
++        return -ENOSPC;
++    }
++
++    ret = nvmm_machine_create(&nvmm_global.mach);
++    if (ret == -1) {
++        error_report("NVMM: Machine creation failed, error=%d", errno);
++        return -ENOSPC;
++    }
++
++    ret = nvmm_accel_configure(&nvmm_global.mach);
++    if (ret == -1) {
++        error_report("NVMM: Machine configuration failed, error=%d",
++            errno);
++        return -ENOSPC;
++    }
++
++    nvmm_callbacks_register(&nvmm_callbacks);
++
++    memory_listener_register(&nvmm_memory_listener, &address_space_memory);
++    ram_block_notifier_add(&nvmm_ram_notifier);
++
++    cpu_interrupt_handler = nvmm_handle_interrupt;
++
++    printf("NetBSD Virtual Machine Monitor accelerator is operational\n");
++    return 0;
 +}
 +
 +int
 +nvmm_enabled(void)
 +{
-+	return nvmm_allowed;
++    return nvmm_allowed;
 +}
 +
 +static void
 +nvmm_accel_class_init(ObjectClass *oc, void *data)
 +{
-+	AccelClass *ac = ACCEL_CLASS(oc);
-+	ac->name = "NVMM";
-+	ac->init_machine = nvmm_accel_init;
-+	ac->allowed = &nvmm_allowed;
++    AccelClass *ac = ACCEL_CLASS(oc);
++    ac->name = "NVMM";
++    ac->init_machine = nvmm_accel_init;
++    ac->allowed = &nvmm_allowed;
 +}
 +
 +static const TypeInfo nvmm_accel_type = {
-+	.name = ACCEL_CLASS_NAME("nvmm"),
-+	.parent = TYPE_ACCEL,
-+	.class_init = nvmm_accel_class_init,
++    .name = ACCEL_CLASS_NAME("nvmm"),
++    .parent = TYPE_ACCEL,
++    .class_init = nvmm_accel_class_init,
 +};
 +
 +static void
 +nvmm_type_init(void)
 +{
-+	type_register_static(&nvmm_accel_type);
++    type_register_static(&nvmm_accel_type);
 +}
 +
 +type_init(nvmm_type_init);
---- vl.c	2018-08-14 21:10:35.000000000 +0200
-+++ vl.c	2018-11-01 11:34:34.466762381 +0100
-@@ -3620,7 +3620,8 @@
+--- vl.c	2018-12-11 18:44:35.000000000 +0100
++++ vl.c	2019-02-04 10:07:16.467301929 +0100
+@@ -3653,7 +3653,8 @@
                                                       optarg, true);
                  optarg = qemu_opt_get(accel_opts, "accel");
                  if (!optarg || is_help_option(optarg)) {
diff --git a/qemu-nvmm/patches/patch-target_arm_cpu.h b/qemu-nvmm/patches/patch-target_arm_cpu.h
new file mode 100644
index 0000000000..92f754bcfa
--- /dev/null
+++ b/qemu-nvmm/patches/patch-target_arm_cpu.h
@@ -0,0 +1,20 @@
+$NetBSD: patch-target_arm_cpu.h,v 1.2 2019/01/31 13:39:10 martin Exp $
+
+Upstream fix for arm/aarch64 FPU exception handling
+test failures in the NetBSD ATF test suit.
+
+--- target/arm/cpu.h.orig	2018-12-11 18:44:34.000000000 +0100
++++ target/arm/cpu.h	2019-01-31 14:29:37.037792781 +0100
+@@ -1299,6 +1299,12 @@ void vfp_set_fpscr(CPUARMState *env, uin
+ #define FPSR_MASK 0xf800009f
+ #define FPCR_MASK 0x07ff9f00
+ 
++#define FPCR_IOE    (1 << 8)    /* Invalid Operation exception trap enable */
++#define FPCR_DZE    (1 << 9)    /* Divide by Zero exception trap enable */
++#define FPCR_OFE    (1 << 10)   /* Overflow exception trap enable */
++#define FPCR_UFE    (1 << 11)   /* Underflow exception trap enable */
++#define FPCR_IXE    (1 << 12)   /* Inexact exception trap enable */
++#define FPCR_IDE    (1 << 15)   /* Input Denormal exception trap enable */
+ #define FPCR_FZ16   (1 << 19)   /* ARMv8.2+, FP16 flush-to-zero */
+ #define FPCR_FZ     (1 << 24)   /* Flush-to-zero enable bit */
+ #define FPCR_DN     (1 << 25)   /* Default NaN enable bit */
diff --git a/qemu-nvmm/patches/patch-target_arm_helper.c b/qemu-nvmm/patches/patch-target_arm_helper.c
new file mode 100644
index 0000000000..f167878d65
--- /dev/null
+++ b/qemu-nvmm/patches/patch-target_arm_helper.c
@@ -0,0 +1,20 @@
+$NetBSD: patch-target_arm_helper.c,v 1.1 2019/01/31 13:39:10 martin Exp $
+
+Upstream fix for arm/aarch64 FPU exception handling
+test failures in the NetBSD ATF test suit.
+
+--- target/arm/helper.c.orig	2018-12-11 18:44:34.000000000 +0100
++++ target/arm/helper.c	2019-01-31 14:29:37.050410598 +0100
+@@ -11747,6 +11747,12 @@
+         val &= ~FPCR_FZ16;
+     }
+ 
++    /*
++     * We don't implement trapped exception handling, so the
++     * trap enable bits are all RAZ/WI (not RES0!)
++     */
++    val &= ~(FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE);
++
+     changed = env->vfp.xregs[ARM_VFP_FPSCR];
+     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
+     env->vfp.vec_len = (val >> 16) & 7;


Home | Main Index | Thread Index | Old Index