Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/bouyer-xenpvh]: src/sys/arch/xen/xen It is possible to use grant_v2 with...



details:   https://anonhg.NetBSD.org/src/rev/22bdd7787f4c
branches:  bouyer-xenpvh
changeset: 931078:22bdd7787f4c
user:      bouyer <bouyer%NetBSD.org@localhost>
date:      Tue Apr 21 16:57:40 2020 +0000

description:
It is possible to use grant_v2 with HVM guest; but the status table has
to be mapped using XENMEM_add_to_physmap.

diffstat:

 sys/arch/xen/xen/xengnt.c |  118 ++++++++-------------------------------------
 1 files changed, 23 insertions(+), 95 deletions(-)

diffs (236 lines):

diff -r b38a82ee3a78 -r 22bdd7787f4c sys/arch/xen/xen/xengnt.c
--- a/sys/arch/xen/xen/xengnt.c Mon Apr 20 20:19:07 2020 +0000
+++ b/sys/arch/xen/xen/xengnt.c Tue Apr 21 16:57:40 2020 +0000
@@ -1,4 +1,4 @@
-/*      $NetBSD: xengnt.c,v 1.29.2.2 2020/04/20 19:42:10 bouyer Exp $      */
+/*      $NetBSD: xengnt.c,v 1.29.2.3 2020/04/21 16:57:40 bouyer Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xengnt.c,v 1.29.2.2 2020/04/20 19:42:10 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xengnt.c,v 1.29.2.3 2020/04/21 16:57:40 bouyer Exp $");
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -44,16 +44,6 @@
 
 #include "opt_xen.h"
 
-/* 
- * grant table v2 is not supported for HVM guests on 4.11 at last.
- * see xen/arch/x86/hvm/hypercall.c in Xen sources (missing
- * GNTTABOP_get_status_frames)
- */
-
-#ifdef XENPV
-#define USE_GRANT_V2
-#endif
-
 /* #define XENDEBUG */
 #ifdef XENDEBUG
 #define DPRINTF(x) printf x
@@ -77,7 +67,6 @@
 #define XENGNT_NO_ENTRY 0xffffffff
 
 /* VM address of the grant table */
-#ifdef USE_GRANT_V2
 #define NR_GRANT_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
 #define NR_GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
 
@@ -86,10 +75,6 @@
 int gnt_status_frames;
 
 grant_status_t *grant_status;
-#else /* USE_GRANT_V2 */
-#define NR_GRANT_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v1_t))
-grant_entry_v1_t *grant_table;
-#endif /* USE_GRANT_V2 */
 kmutex_t grant_lock;
 
 static grant_ref_t xengnt_get_entry(void);
@@ -118,13 +103,11 @@
        gnt_nr_grant_frames = gnt_max_grant_frames;
 
 
-#ifdef USE_GRANT_V2
        struct gnttab_set_version gntversion;
        gntversion.version = 2;
        rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gntversion, 1);
        if (rc < 0 || gntversion.version != 2)
                panic("GNTTABOP_set_version 2 failed %d", rc);
-#endif /* USE_GRANT_V2 */
 
        nr_grant_entries =
            gnt_max_grant_frames * NR_GRANT_ENTRIES_PER_PAGE;
@@ -139,14 +122,12 @@
        for (i = 0; i <= nr_grant_entries; i++)
                gnt_entries[i] = XENGNT_NO_ENTRY;
 
-#ifdef USE_GRANT_V2
        gnt_status_frames =
            round_page(nr_grant_entries * sizeof(grant_status_t)) / PAGE_SIZE;
        grant_status = (void *)uvm_km_alloc(kernel_map,
            gnt_status_frames * PAGE_SIZE, 0, UVM_KMF_VAONLY);
        if (grant_status == NULL)
                panic("xengnt_init() status no VM space");
-#endif /* USE_GRANT_V2 */
 
        mutex_init(&grant_lock, MUTEX_DEFAULT, IPL_VM);
 
@@ -194,10 +175,8 @@
        /* Remove virtual => machine mapping for grant table */
        pmap_kremove((vaddr_t)grant_table, gnt_nr_grant_frames * PAGE_SIZE);
 
-#ifdef USE_GRANT_V2
        /* Remove virtual => machine mapping for status table */
        pmap_kremove((vaddr_t)grant_status, gnt_status_frames * PAGE_SIZE);
-#endif
 
        pmap_update(pmap_kernel());
        mutex_exit(&grant_lock);
@@ -210,12 +189,8 @@
 static int
 xengnt_map_status(void)
 {
-#ifdef USE_GRANT_V2
-       gnttab_get_status_frames_t getstatus;
        uint64_t *pages;
        size_t sz;
-       int err;
-
        KASSERT(mutex_owned(&grant_lock));
 
        sz = gnt_status_frames * sizeof(*pages);
@@ -223,6 +198,10 @@
        if (pages == NULL)
                return ENOMEM;
 
+#ifdef XENPV
+       gnttab_get_status_frames_t getstatus;
+       int err;
+
        getstatus.dom = DOMID_SELF;
        getstatus.nr_frames = gnt_status_frames;
        set_xen_guest_handle(getstatus.frame_list, pages);
@@ -240,7 +219,23 @@
                kmem_free(pages, sz);
                return ENOMEM;
        }
+#else /* XENPV */
+       for (int i = 0; i < gnt_status_frames; i++) {
+               struct vm_page *pg;
+               struct xen_add_to_physmap xmap;
 
+               pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+               pages[i] = atop(uvm_vm_page_to_phys(pg));
+
+               xmap.domid = DOMID_SELF;
+               xmap.space = XENMAPSPACE_grant_table;
+               xmap.idx = i | XENMAPIDX_grant_table_status;
+               xmap.gpfn = pages[i];
+
+               if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0)
+                       panic("%s: Unable to add grant tables\n", __func__);
+       }
+#endif /* XENPV */
        /*
         * map between status_table addresses and the machine addresses of
         * the status table frames
@@ -253,8 +248,6 @@
        pmap_update(pmap_kernel());
 
        kmem_free(pages, sz);
-
-#endif /* USE_GRANT_V2 */
        return 0;
 }
 
@@ -298,7 +291,7 @@
                xmap.gpfn = pages[gnt_nr_grant_frames];
 
                if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0)
-                       panic("%s: Unable to register HYPERVISOR_shared_info\n", __func__);
+                       panic("%s: Unable to add grant frames\n", __func__);
 
        } else {
                setup.dom = DOMID_SELF;
@@ -398,7 +391,6 @@
        mutex_exit(&grant_lock);
 }
 
-#ifdef USE_GRANT_V2
 int
 xengnt_grant_access(domid_t dom, paddr_t ma, int ro, grant_ref_t *entryp)
 {
@@ -451,67 +443,3 @@
 {
        return grant_status[entry] & (GTF_reading|GTF_writing);
 }
-#else /* USE_GRANT_V2 */
-
-int     
-xengnt_grant_access(domid_t dom, paddr_t ma, int ro, grant_ref_t *entryp)
-{       
-       mutex_enter(&grant_lock);
-
-       *entryp = xengnt_get_entry();
-       if (__predict_false(*entryp == XENGNT_NO_ENTRY)) {
-               mutex_exit(&grant_lock);
-               return ENOMEM;
-       }
-
-       grant_table[*entryp].frame = ma >> PAGE_SHIFT;
-       grant_table[*entryp].domid = dom;
-       /*      
-        * ensure that the above values reach global visibility
-        * before permitting frame's access (done when we set flags)    
-        */
-       xen_rmb();
-       grant_table[*entryp].flags =  
-           GTF_permit_access | (ro ? GTF_readonly : 0);
-       mutex_exit(&grant_lock);      
-       return 0;
-}
-
-
-static inline uint16_t
-xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t  val, uint16_t newval) 
-{
-       unsigned long result;
-
-       __asm volatile(__LOCK_PREFIX
-           "cmpxchgw %w1,%2"
-           :"=a" (result)
-           :"q"(newval), "m" (*ptr), "0" (val)
-           :"memory");
-
-       return result;
-}
-
-void
-xengnt_revoke_access(grant_ref_t entry)
-{
-
-       uint16_t flags, nflags;
-
-       nflags = grant_table[entry].flags;
-
-       do {
-               if ((flags = nflags) & (GTF_reading|GTF_writing))
-                       panic("xengnt_revoke_access: still in use");
-               nflags = xen_atomic_cmpxchg16(&grant_table[entry].flags,
-                   flags, 0);
-       } while (nflags != flags);
-       xengnt_free_entry(entry);
-}
-
-int
-xengnt_status(grant_ref_t entry)
-{
-       return (grant_table[entry].flags & (GTF_reading|GTF_writing));
-}
-#endif /* USE_GRANT_V2 */



Home | Main Index | Thread Index | Old Index