Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src Invert the mapping logic.



details:   https://anonhg.NetBSD.org/src/rev/09b4484c1e0b
branches:  trunk
changeset: 446717:09b4484c1e0b
user:      maxv <maxv%NetBSD.org@localhost>
date:      Sat Dec 15 13:39:43 2018 +0000

description:
Invert the mapping logic.

Until now, the "owner" of the memory was the guest, and by calling
nvmm_gpa_map(), the virtualizer was creating a view towards the guest
memory.

Qemu expects the contrary: it wants the owner to be the virtualizer, and
nvmm_gpa_map should just create a view from the guest towards the
virtualizer's address space. Under this scheme, it is legal to have two
GPAs that point to the same HVA.

Introduce nvmm_hva_map() and nvmm_hva_unmap(), that map/unamp the HVA into
a dedicated UOBJ. Change nvmm_gpa_map() and nvmm_gpa_unmap() to just
perform an enter into the desired UOBJ.

With this change in place, all the mapping-related problems in Qemu+NVMM
are fixed.

diffstat:

 lib/libnvmm/libnvmm.3        |   45 +++++--
 lib/libnvmm/libnvmm.c        |   75 ++++++++----
 lib/libnvmm/nvmm.h           |    4 +-
 sys/dev/nvmm/nvmm.c          |  251 ++++++++++++++++++++++++++++++++++++------
 sys/dev/nvmm/nvmm_internal.h |   14 ++-
 sys/dev/nvmm/nvmm_ioctl.h    |   18 ++-
 6 files changed, 329 insertions(+), 78 deletions(-)

diffs (truncated from 622 to 300 lines):

diff -r c6787f67d100 -r 09b4484c1e0b lib/libnvmm/libnvmm.3
--- a/lib/libnvmm/libnvmm.3     Sat Dec 15 13:20:46 2018 +0000
+++ b/lib/libnvmm/libnvmm.3     Sat Dec 15 13:39:43 2018 +0000
@@ -1,4 +1,4 @@
-.\"    $NetBSD: libnvmm.3,v 1.4 2018/12/12 11:40:08 wiz Exp $
+.\"    $NetBSD: libnvmm.3,v 1.5 2018/12/15 13:39:43 maxv Exp $
 .\"
 .\" Copyright (c) 2018 The NetBSD Foundation, Inc.
 .\" All rights reserved.
@@ -27,7 +27,7 @@
 .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 .\" POSSIBILITY OF SUCH DAMAGE.
 .\"
-.Dd December 12, 2018
+.Dd December 14, 2018
 .Dt LIBNVMM 3
 .Os
 .Sh NAME
@@ -63,6 +63,10 @@
 .Fn nvmm_vcpu_run "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
     "struct nvmm_exit *exit"
 .Ft int
+.Fn nvmm_hva_map "struct nvmm_machine *mach" "uintptr_t hva" "size_t size"
+.Ft int
+.Fn nvmm_hva_unmap "struct nvmm_machine *mach" "uintptr_t hva" "size_t size"
+.Ft int
 .Fn nvmm_gpa_map "struct nvmm_machine *mach" "uintptr_t hva" "gpaddr_t gpa" \
     "size_t size" "int flags"
 .Ft int
@@ -164,15 +168,33 @@
 structure is filled to indicate the exit reason, and the associated parameters
 if any.
 .Pp
-.Fn nvmm_gpa_map
-makes the guest physical memory area beginning on address
-.Fa gpa
-and of size
+.Fn nvmm_hva_map
+maps at address
+.Fa hva
+a buffer of size
+.Fa size
+in the calling process' virtual address space.
+This buffer is allowed to be subsequently mapped in a virtual machine.
+.Pp
+.Fn nvmm_hva_unmap
+unmaps the buffer of size
 .Fa size
-available in the machine
-.Fa mach .
-The area is mapped in the calling process' virtual address space, at address
-.Fa hva .
+at address
+.Fa hva
+from the calling process' virtual address space.
+.Pp
+.Fn nvmm_gpa_map
+maps into the guest physical memory beginning on address
+.Fa gpa
+the buffer of size
+.Fa size
+located at address
+.Fa hva
+of the calling process' virtual address space.
+The
+.Fa hva
+parameter must point to a buffer that was previously mapped with
+.Fn nvmm_hva_map .
 .Pp
 .Fn nvmm_gpa_unmap
 removes the guest physical memory area beginning on address
@@ -181,9 +203,6 @@
 .Fa size
 from the machine
 .Fa mach .
-It also unmaps the area beginning on
-.Fa hva
-from the calling process' virtual address space.
 .Pp
 .Fn nvmm_gva_to_gpa
 translates, on the CPU
diff -r c6787f67d100 -r 09b4484c1e0b lib/libnvmm/libnvmm.c
--- a/lib/libnvmm/libnvmm.c     Sat Dec 15 13:20:46 2018 +0000
+++ b/lib/libnvmm/libnvmm.c     Sat Dec 15 13:39:43 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: libnvmm.c,v 1.4 2018/12/12 10:42:34 maxv Exp $ */
+/*     $NetBSD: libnvmm.c,v 1.5 2018/12/15 13:39:43 maxv Exp $ */
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -65,24 +65,12 @@
        area_t *ent;
 
        LIST_FOREACH(ent, areas, list) {
-               /* Collision on HVA */
-               if (hva >= ent->hva && hva < ent->hva + ent->size) {
-                       return false;
-               }
-               if (hva + size >= ent->hva &&
-                   hva + size < ent->hva + ent->size) {
-                       return false;
-               }
-               if (hva <= ent->hva && hva + size >= ent->hva + ent->size) {
-                       return false;
-               }
-
                /* Collision on GPA */
                if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
                        return false;
                }
-               if (gpa + size >= ent->gpa &&
-                   gpa + size < ent->gpa + ent->size) {
+               if (gpa + size > ent->gpa &&
+                   gpa + size <= ent->gpa + ent->size) {
                        return false;
                }
                if (gpa <= ent->gpa && gpa + size >= ent->gpa + ent->size) {
@@ -434,12 +422,54 @@
        args.size = size;
 
        ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args);
+       if (ret == -1) {
+               /* Can't recover. */
+               abort();
+       }
+
+       return 0;
+}
+
+int
+nvmm_hva_map(struct nvmm_machine *mach, uintptr_t hva, size_t size)
+{
+       struct nvmm_ioc_hva_map args;
+       int ret;
+
+       if (nvmm_init() == -1) {
+               return -1;
+       }
+
+       args.machid = mach->machid;
+       args.hva = hva;
+       args.size = size;
+
+       ret = ioctl(nvmm_fd, NVMM_IOC_HVA_MAP, &args);
        if (ret == -1)
                return -1;
 
-       ret = munmap((void *)hva, size);
+       return 0;
+}
+
+int
+nvmm_hva_unmap(struct nvmm_machine *mach, uintptr_t hva, size_t size)
+{
+       struct nvmm_ioc_hva_map args;
+       int ret;
 
-       return ret;
+       if (nvmm_init() == -1) {
+               return -1;
+       }
+
+       args.machid = mach->machid;
+       args.hva = hva;
+       args.size = size;
+
+       ret = ioctl(nvmm_fd, NVMM_IOC_HVA_MAP, &args);
+       if (ret == -1)
+               return -1;
+
+       return 0;
 }
 
 /*
@@ -458,15 +488,10 @@
        }
 
        LIST_FOREACH(ent, areas, list) {
-               if (gpa < ent->gpa) {
-                       continue;
+               if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
+                       *hva = ent->hva + (gpa - ent->gpa);
+                       return 0;
                }
-               if (gpa >= ent->gpa + ent->size) {
-                       continue;
-               }
-
-               *hva = ent->hva + (gpa - ent->gpa);
-               return 0;
        }
 
        errno = ENOENT;
diff -r c6787f67d100 -r 09b4484c1e0b lib/libnvmm/nvmm.h
--- a/lib/libnvmm/nvmm.h        Sat Dec 15 13:20:46 2018 +0000
+++ b/lib/libnvmm/nvmm.h        Sat Dec 15 13:39:43 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvmm.h,v 1.2 2018/11/29 19:55:20 maxv Exp $    */
+/*     $NetBSD: nvmm.h,v 1.3 2018/12/15 13:39:43 maxv Exp $    */
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -83,6 +83,8 @@
 
 int nvmm_gpa_map(struct nvmm_machine *, uintptr_t, gpaddr_t, size_t, int);
 int nvmm_gpa_unmap(struct nvmm_machine *, uintptr_t, gpaddr_t, size_t);
+int nvmm_hva_map(struct nvmm_machine *, uintptr_t, size_t);
+int nvmm_hva_unmap(struct nvmm_machine *, uintptr_t, size_t);
 
 int nvmm_gva_to_gpa(struct nvmm_machine *, nvmm_cpuid_t, gvaddr_t, gpaddr_t *,
     nvmm_prot_t *);
diff -r c6787f67d100 -r 09b4484c1e0b sys/dev/nvmm/nvmm.c
--- a/sys/dev/nvmm/nvmm.c       Sat Dec 15 13:20:46 2018 +0000
+++ b/sys/dev/nvmm/nvmm.c       Sat Dec 15 13:39:43 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvmm.c,v 1.3 2018/11/25 14:11:24 maxv Exp $    */
+/*     $NetBSD: nvmm.c,v 1.4 2018/12/15 13:39:43 maxv Exp $    */
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.3 2018/11/25 14:11:24 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.4 2018/12/15 13:39:43 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -213,7 +213,14 @@
                        nvmm_vcpu_put(vcpu);
                }
                uvmspace_free(mach->vm);
-               uao_detach(mach->uobj);
+
+               /* Drop the kernel UOBJ refs. */
+               for (j = 0; j < NVMM_MAX_SEGS; j++) {
+                       if (!mach->segs[j].present)
+                               continue;
+                       uao_detach(mach->segs[j].uobj);
+               }
+
                nvmm_machine_free(mach);
 
                rw_exit(&mach->lock);
@@ -249,14 +256,13 @@
        /* Curproc owns the machine. */
        mach->procid = curproc->p_pid;
 
+       /* Zero out the segments. */
+       memset(&mach->segs, 0, sizeof(mach->segs));
+
        /* Create the machine vmspace. */
        mach->gpa_begin = 0;
        mach->gpa_end = NVMM_MAX_RAM;
        mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
-       mach->uobj = uao_create(mach->gpa_end - mach->gpa_begin, 0);
-
-       /* Grab a reference for the machine. */
-       uao_reference(mach->uobj);
 
        (*nvmm_impl->machine_create)(mach);
 
@@ -292,7 +298,13 @@
 
        /* Free the machine vmspace. */
        uvmspace_free(mach->vm);
-       uao_detach(mach->uobj);
+
+       /* Drop the kernel UOBJ refs. */
+       for (i = 0; i < NVMM_MAX_SEGS; i++) {
+               if (!mach->segs[i].present)
+                       continue;
+               uao_detach(mach->segs[i].uobj);
+       }
 
        nvmm_machine_free(mach);
        nvmm_machine_put(mach);
@@ -500,22 +512,193 @@
 
 /* -------------------------------------------------------------------------- */
 
+static struct uvm_object *
+nvmm_seg_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
+   size_t *off)
+{
+       struct nvmm_seg *seg;
+       size_t i;
+
+       for (i = 0; i < NVMM_MAX_SEGS; i++) {
+               seg = &mach->segs[i];
+               if (!seg->present) {
+                       continue;
+               }
+               if (hva >= seg->hva && hva + size <= seg->hva + seg->size) {
+                       *off = hva - seg->hva;
+                       return seg->uobj;
+               }
+       }
+



Home | Main Index | Thread Index | Old Index