Port-i386 archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: Xen #ifdef-age & x86 bus_space(9) implementation
On Wed, May 05, 2010 at 04:16:17PM -0500, David Young wrote:
> On Wed, May 05, 2010 at 04:09:44PM -0500, David Young wrote:
> > Is there any reason not to add a weak alias pmap_kenter_ma ->
> > pmap_kenter_pa, and to extract pmap_kenter_ma() into its own object that
> > is linked into the Xen kernels, only?
>
> ... and use pmap_kenter_ma() throughout bus_space.c?
>
> Is there any reason not to add a weak alias pmap_extract_ma ->
> pmap_extract, too, and extract pmap_extract_ma() into its own
> object that is linked into the Xen kernels, only? Then we can use
> pmap_extract_ma() throughout bus_space.c, too, and we can delete
> this strange thing:
>
> #ifdef XEN
> #include <xen/hypervisor.h>
> #include <xen/xenpmap.h>
>
> #define pmap_extract(a, b, c) pmap_extract_ma(a, b, c)
> #endif
How about something like this?
Dave
--
David Young OJC Technologies
dyoung%ojctech.com@localhost Urbana, IL * (217) 278-3933
/*
* Copyright (c) 2007 Manuel Bouyer.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* Copyright (c) 2006 Mathieu Ropert <mro%adviseo.fr@localhost>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright 2001 (c) Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD$");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include "opt_xen.h"
#if !defined(__x86_64__)
#include "opt_kstack_dr0.h"
#endif /* !defined(__x86_64__) */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/pool.h>
#include <sys/kernel.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <sys/intr.h>
#include <sys/xcall.h>
#include <uvm/uvm.h>
#include <dev/isa/isareg.h>
#include <machine/specialreg.h>
#include <machine/gdt.h>
#include <machine/isa_machdep.h>
#include <machine/cpuvar.h>
#include <x86/pmap.h>
#include <x86/pmap_pv.h>
#include <x86/i82489reg.h>
#include <x86/i82489var.h>
#ifdef XEN
#include <xen/xen3-public/xen.h>
#include <xen/hypervisor.h>
#endif
/* flag to be used for kernel mappings: PG_u on Xen/amd64, 0 otherwise */
#if defined(XEN) && defined(__x86_64__)
#define PG_k PG_u
#else
#define PG_k 0
#endif
extern paddr_t pmap_pa_start; /* PA of first physical page for this domain */
extern paddr_t pmap_pa_end; /* PA of last physical page for this domain */
int
pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int
flags)
{
paddr_t ma;
if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
ma = pa; /* XXX hack */
} else {
ma = xpmap_ptom(pa);
}
return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF);
}
/*
* pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
* => we expect a MACHINE address
*/
void
pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
{
pt_entry_t *pte, opte, npte;
if (va < VM_MIN_KERNEL_ADDRESS)
pte = vtopte(va);
else
pte = kvtopte(va);
npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
PG_V | PG_k;
if (flags & PMAP_NOCACHE)
npte |= PG_N;
if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
npte |= PG_NX;
opte = pmap_pte_testset (pte, npte); /* zap! */
if (pmap_valid_entry(opte)) {
#if defined(MULTIPROCESSOR)
kpreempt_disable();
pmap_tlb_shootdown(pmap_kernel(), va, 0, opte);
kpreempt_enable();
#else
/* Don't bother deferring in the single CPU case. */
pmap_update_pg(va);
#endif
}
}
/*
* pmap_extract_ma: extract a MA for the given VA
*/
bool
pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
{
pt_entry_t *ptes, pte;
pd_entry_t pde;
pd_entry_t * const *pdes;
struct pmap *pmap2;
kpreempt_disable();
pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
if (!pmap_pdes_valid(va, pdes, &pde)) {
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
return false;
}
pte = ptes[pl1_i(va)];
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
if (__predict_true((pte & PG_V) != 0)) {
if (pap != NULL)
*pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
return true;
}
return false;
}
? sys/arch/xen/x86/xen_pmap.c
Index: sys/arch/xen/conf/files.xen
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/conf/files.xen,v
retrieving revision 1.106
diff -u -p -r1.106 files.xen
--- sys/arch/xen/conf/files.xen 3 Nov 2009 05:23:28 -0000 1.106
+++ sys/arch/xen/conf/files.xen 5 May 2010 23:04:08 -0000
@@ -106,6 +106,7 @@ file arch/xen/x86/hypervisor_machdep.c
# file arch/x86/x86/mtrr_i686.c mtrr
file arch/x86/x86/syscall.c
file arch/xen/x86/x86_xpmap.c
+file arch/xen/x86/xen_pmap.c
file arch/xen/x86/xen_intr.c
file arch/xen/x86/xenfunc.c
Index: sys/arch/x86/include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/pmap.h,v
retrieving revision 1.29
diff -u -p -r1.29 pmap.h
--- sys/arch/x86/include/pmap.h 9 Feb 2010 22:51:13 -0000 1.29
+++ sys/arch/x86/include/pmap.h 5 May 2010 23:04:08 -0000
@@ -228,6 +228,12 @@ void pmap_emap_enter(vaddr_t, paddr_t,
void pmap_emap_remove(vaddr_t, vsize_t);
void pmap_emap_sync(bool);
+void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
+ pd_entry_t * const **);
+void pmap_unmap_ptes(struct pmap *, struct pmap *);
+
+int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
+
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
@@ -248,6 +254,12 @@ bool pmap_pageidlezero(paddr_t);
* inline functions
*/
+__inline static bool __unused
+pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
+{
+ return pmap_pdes_invalid(va, pdes, lastpde) == 0;
+}
+
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
@@ -401,17 +413,17 @@ xpmap_update (pt_entry_t *pte, pt_entry_
/* Xen helpers to change bits of a pte */
#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
+paddr_t vtomach(vaddr_t);
+#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
+
+#endif /* XEN */
+
/* pmap functions with machine addresses */
void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
vm_prot_t, u_int, int);
bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
-paddr_t vtomach(vaddr_t);
-#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
-
-#endif /* XEN */
-
/*
* Hooks for the pool allocator.
*/
Index: sys/arch/x86/x86/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/bus_space.c,v
retrieving revision 1.28
diff -u -p -r1.28 bus_space.c
--- sys/arch/x86/x86/bus_space.c 28 Apr 2010 20:27:36 -0000 1.28
+++ sys/arch/x86/x86/bus_space.c 5 May 2010 23:04:08 -0000
@@ -48,9 +48,6 @@ __KERNEL_RCSID(0, "$NetBSD: bus_space.c,
#ifdef XEN
#include <xen/hypervisor.h>
-#include <xen/xenpmap.h>
-
-#define pmap_extract(a, b, c) pmap_extract_ma(a, b, c)
#endif
/*
@@ -339,11 +398,7 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_
*bshp = (bus_space_handle_t)(sva + (bpa & PGOFSET));
for (va = sva; pa != endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
-#ifdef XEN
pmap_kenter_ma(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
-#else
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
-#endif /* XEN */
}
pmap_update(pmap_kernel());
@@ -395,7 +450,7 @@ _x86_memio_unmap(bus_space_tag_t t, bus_
}
#endif
- if (pmap_extract(pmap_kernel(), va, &bpa) == FALSE) {
+ if (pmap_extract_ma(pmap_kernel(), va, &bpa) == FALSE) {
panic("_x86_memio_unmap:"
" wrong virtual address");
}
@@ -447,7 +504,7 @@ bus_space_unmap(bus_space_tag_t t, bus_s
panic("x86_memio_unmap: overflow");
#endif
- (void) pmap_extract(pmap_kernel(), va, &bpa);
+ (void) pmap_extract_ma(pmap_kernel(), va, &bpa);
bpa += (bsh & PGOFSET);
pmap_kremove(va, endva - va);
Index: sys/arch/x86/x86/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/pmap.c,v
retrieving revision 1.108
diff -u -p -r1.108 pmap.c
--- sys/arch/x86/x86/pmap.c 4 May 2010 23:27:14 -0000 1.108
+++ sys/arch/x86/x86/pmap.c 5 May 2010 23:04:11 -0000
@@ -531,6 +531,8 @@ static struct pool_cache pmap_pv_cache;
static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *early_zero_pte;
static char *csrcp, *cdstp, *zerop, *ptpp, *early_zerop;
+int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
+
/*
* pool and cache that PDPs are allocated from
*/
@@ -580,8 +582,6 @@ static void pmap_free_ptp(struct pmap
pd_entry_t * const *);
static bool pmap_is_curpmap(struct pmap *);
static bool pmap_is_active(struct pmap *, struct cpu_info *, bool);
-static void pmap_map_ptes(struct pmap *, struct pmap **,
- pt_entry_t **, pd_entry_t * const **);
static bool pmap_remove_pte(struct pmap *, struct vm_page *,
pt_entry_t *, vaddr_t,
struct pv_entry **);
@@ -589,13 +589,8 @@ static pt_entry_t pmap_remove_ptes(stru
vaddr_t, vaddr_t, vaddr_t,
struct pv_entry **);
-static void pmap_unmap_ptes(struct pmap *, struct pmap *);
static void pmap_unmap_apdp(void);
static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
-static int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *,
- pd_entry_t *);
-#define pmap_pdes_valid(va, pdes, lastpde) \
- (pmap_pdes_invalid((va), (pdes), (lastpde)) == 0)
static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int,
long *);
@@ -781,7 +776,7 @@ pmap_reference(struct pmap *pmap)
* => must be undone with pmap_unmap_ptes before returning
*/
-static void
+void
pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2,
pd_entry_t **ptepp, pd_entry_t * const **pdeppp)
{
@@ -914,7 +909,7 @@ pmap_map_ptes(struct pmap *pmap, struct
* pmap_unmap_ptes: unlock the PTE mapping of "pmap"
*/
-static void
+void
pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2)
{
@@ -1122,47 +1117,7 @@ pmap_emap_remove(vaddr_t sva, vsize_t le
}
}
-#ifdef XEN
-/*
- * pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
- *
- * => no need to lock anything, assume va is already allocated
- * => should be faster than normal pmap enter function
- * => we expect a MACHINE address
- */
-
-void
-pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
-{
- pt_entry_t *pte, opte, npte;
-
- if (va < VM_MIN_KERNEL_ADDRESS)
- pte = vtopte(va);
- else
- pte = kvtopte(va);
-
- npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
- PG_V | PG_k;
- if (flags & PMAP_NOCACHE)
- npte |= PG_N;
-
- if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
- npte |= PG_NX;
-
- opte = pmap_pte_testset (pte, npte); /* zap! */
-
- if (pmap_valid_entry(opte)) {
-#if defined(MULTIPROCESSOR)
- kpreempt_disable();
- pmap_tlb_shootdown(pmap_kernel(), va, 0, opte);
- kpreempt_enable();
-#else
- /* Don't bother deferring in the single CPU case. */
- pmap_update_pg(va);
-#endif
- }
-}
-#endif /* XEN */
+__weak_alias(pmap_kenter_ma, pmap_kenter_pa);
#if defined(__x86_64__)
/*
@@ -2883,7 +2838,7 @@ pmap_deactivate(struct lwp *l)
* some misc. functions
*/
-static int
+int
pmap_pdes_invalid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
{
int i;
@@ -2977,39 +2932,9 @@ vtophys(vaddr_t va)
return (0);
}
-#ifdef XEN
-/*
- * pmap_extract_ma: extract a MA for the given VA
- */
+__weak_alias(pmap_extract_ma, pmap_extract);
-bool
-pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
-{
- pt_entry_t *ptes, pte;
- pd_entry_t pde;
- pd_entry_t * const *pdes;
- struct pmap *pmap2;
-
- kpreempt_disable();
- pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
- if (!pmap_pdes_valid(va, pdes, &pde)) {
- pmap_unmap_ptes(pmap, pmap2);
- kpreempt_enable();
- return false;
- }
-
- pte = ptes[pl1_i(va)];
- pmap_unmap_ptes(pmap, pmap2);
- kpreempt_enable();
-
- if (__predict_true((pte & PG_V) != 0)) {
- if (pap != NULL)
- *pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
- return true;
- }
-
- return false;
-}
+#ifdef XEN
/*
* vtomach: virtual address to machine address. For use by
@@ -3028,8 +2953,6 @@ vtomach(vaddr_t va)
#endif /* XEN */
-
-
/*
* pmap_virtual_space: used during bootup [pmap_steal_memory] to
* determine the bounds of the kernel virtual addess space.
@@ -3985,24 +3908,25 @@ pmap_unwire(struct pmap *pmap, vaddr_t v
* defined as macro in pmap.h
*/
+__weak_alias(pmap_enter, pmap_enter_default);
+
+int
+pmap_enter_default(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
+ u_int flags)
+{
+ return pmap_enter_ma(pmap, va, pa, pa, prot, flags, 0);
+}
+
/*
* pmap_enter: enter a mapping into a pmap
*
* => must be done "now" ... no lazy-evaluation
* => we set pmap => pv_head locking
*/
-#ifdef XEN
int
pmap_enter_ma(struct pmap *pmap, vaddr_t va, paddr_t ma, paddr_t pa,
vm_prot_t prot, u_int flags, int domid)
{
-#else /* XEN */
-int
-pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
- u_int flags)
-{
- paddr_t ma = pa;
-#endif /* XEN */
pt_entry_t *ptes, opte, npte;
pt_entry_t *ptep;
pd_entry_t * const *pdes;
@@ -4214,22 +4138,6 @@ out2:
return error;
}
-#ifdef XEN
-int
-pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int
flags)
-{
- paddr_t ma;
-
- if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
- ma = pa; /* XXX hack */
- } else {
- ma = xpmap_ptom(pa);
- }
-
- return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF);
-}
-#endif /* XEN */
-
static bool
pmap_get_physpage(vaddr_t va, int level, paddr_t *paddrp)
{
Home |
Main Index |
Thread Index |
Old Index