tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

tracking P->V for unmanaged device pages



Various DRM graphics drivers, including Intel, Radeon, and Nouveau,
sometimes need to unmap all virtual mappings of certain physical
pages for which there is no struct vm_page.  The issue is explained in
detail here:

https://mail-index.netbsd.org/tech-kern/2014/07/23/msg017392.html

It's not desirable to simply add struct vm_pages on a freelist that
uvm_pagealloc ignores, because struct vm_page is large (120 bytes on
amd64, for example), most of it is unnecessary for P->V tracking, and
the physical regions that need P->V tracking are large (hundreds of
megabytes, or gigabytes).

The attached patch implements the following extension to pmap(9) on
x86 and uses it in DRM[*].  The implementation uses a linear list of
pv-tracked ranges, since it is expected to be short (one to three
elements).  The list is managed with pserialize(9) so it adds no
locking overhead to existing pmap operations that need to look up
entries in it.

core@ discussed the problem and asked that this approach be marked as
an interim solution, because not everyone was happy about it but
nobody had an obviously better idea.

Objections?


void	pmap_pv_init(void);

  Initialize the pmap_pv(9) subsystem.  Called by uvm_init.

void	pmap_pv_track(paddr_t startpa, paddr_t size)

  Do pv-tracking for the unmanaged pages in [startpa, startpa + size).
  Called by a driver on initialization to register device pages.  Can
  be done only once for any given [startpa, startpa + size) range,
  with no overlapping allowed.  The range must be page-aligned.

void	pmap_pv_untrack(paddr_t startpa, paddr_t size)

  Stop doing pv-tracking for the pages in [startpa, startpa + size).

void	pmap_pv_protect(paddr_t pa, vm_prot_t prot)

  Reduce page protection of pv-tracked unmanaged page at pa to prot.
  pa must be page-aligned.


[*] This predictably fixes some rendering issues but seems to expose
another bug causing the X server to crash more frequently, which is
under investigation now.
Index: sys/uvm/uvm_init.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_init.c,v
retrieving revision 1.45
diff -p -u -r1.45 uvm_init.c
--- sys/uvm/uvm_init.c	29 Jan 2013 21:37:04 -0000	1.45
+++ sys/uvm/uvm_init.c	23 Mar 2015 13:13:55 -0000
@@ -133,6 +133,9 @@ uvm_init(void)
 	 */
 
 	uvm_km_init();
+#ifdef __HAVE_PMAP_PV_TRACK
+	pmap_pv_init();
+#endif
 
 #ifdef DEBUG
 	debug_init();
Index: sys/arch/x86/include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/pmap.h,v
retrieving revision 1.55
diff -p -u -r1.55 pmap.h
--- sys/arch/x86/include/pmap.h	17 Oct 2013 20:59:16 -0000	1.55
+++ sys/arch/x86/include/pmap.h	23 Mar 2015 13:13:55 -0000
@@ -243,8 +243,10 @@ extern long nkptp[PTP_LEVELS];
 void		pmap_activate(struct lwp *);
 void		pmap_bootstrap(vaddr_t);
 bool		pmap_clear_attrs(struct vm_page *, unsigned);
+bool		pmap_pv_clear_attrs(paddr_t, unsigned);
 void		pmap_deactivate(struct lwp *);
-void		pmap_page_remove (struct vm_page *);
+void		pmap_page_remove(struct vm_page *);
+void		pmap_pv_remove(paddr_t);
 void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
 bool		pmap_test_attrs(struct vm_page *, unsigned);
 void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
@@ -258,6 +260,11 @@ void		pmap_emap_enter(vaddr_t, paddr_t, 
 void		pmap_emap_remove(vaddr_t, vsize_t);
 void		pmap_emap_sync(bool);
 
+#define	__HAVE_PMAP_PV_TRACK	1
+void		pmap_pv_init(void);
+void		pmap_pv_track(paddr_t, psize_t);
+void		pmap_pv_untrack(paddr_t, psize_t);
+
 void		pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
 		    pd_entry_t * const **);
 void		pmap_unmap_ptes(struct pmap *, struct pmap *);
@@ -359,6 +366,23 @@ pmap_page_protect(struct vm_page *pg, vm
 }
 
 /*
+ * pmap_pv_protect: change the protection of all recorded mappings
+ *	of an unmanaged page
+ */
+
+__inline static void __unused
+pmap_pv_protect(paddr_t pa, vm_prot_t prot)
+{
+	if ((prot & VM_PROT_WRITE) == 0) {
+		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+			(void) pmap_pv_clear_attrs(pa, PG_RW);
+		} else {
+			pmap_pv_remove(pa);
+		}
+	}
+}
+
+/*
  * pmap_protect: change the protection of pages in a pmap
  *
  * => this function is a frontend for pmap_remove/pmap_write_protect
Index: sys/arch/x86/x86/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/pmap.c,v
retrieving revision 1.187
diff -p -u -r1.187 pmap.c
--- sys/arch/x86/x86/pmap.c	27 Nov 2014 16:29:44 -0000	1.187
+++ sys/arch/x86/x86/pmap.c	23 Mar 2015 13:13:56 -0000
@@ -191,6 +191,8 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
 #include <sys/intr.h>
 #include <sys/xcall.h>
 #include <sys/kcore.h>
+#include <sys/kmem.h>
+#include <sys/pserialize.h>
 
 #include <uvm/uvm.h>
 
@@ -248,8 +250,10 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
  * data structures we use include:
  *
  *  - struct pmap: describes the address space of one thread
+ *  - struct pmap_page: describes one pv-tracked page, without
+ *	necessarily a corresponding vm_page
  *  - struct pv_entry: describes one <PMAP,VA> mapping of a PA
- *  - struct pv_head: there is one pv_head per managed page of
+ *  - struct pv_head: there is one pv_head per pv-tracked page of
  *	physical memory.   the pv_head points to a list of pv_entry
  *	structures which describe all the <PMAP,VA> pairs that this
  *      page is mapped in.    this is critical for page based operations
@@ -303,7 +307,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
  *
  * - pvh_lock (per pv_head)
  *   this lock protects the pv_entry list which is chained off the
- *   pv_head structure for a specific managed PA.   it is locked
+ *   pv_head structure for a specific pv-tracked PA.   it is locked
  *   when traversing the list (e.g. adding/removing mappings,
  *   syncing R/M bits, etc.)
  *
@@ -459,6 +463,120 @@ pvhash_remove(struct pv_hash_head *hh, s
 }
 
 /*
+ * unmanaged pv-tracked ranges
+ *
+ * This is a linear list for now because the only user are the DRM
+ * graphics drivers, with a single tracked range per device, for the
+ * graphics aperture, so there are expected to be few of them.
+ *
+ * This is used only after the VM system is initialized well enough
+ * that we can use kmem_alloc.
+ */
+
+struct pv_track {
+	paddr_t			pvt_start;
+	psize_t			pvt_size;
+	struct pv_track		*pvt_next;
+	struct pmap_page	pvt_pages[];
+};
+
+static struct {
+	kmutex_t	lock;
+	pserialize_t	psz;
+	struct pv_track	*list;
+} pv_unmanaged __cacheline_aligned;
+
+void
+pmap_pv_init(void)
+{
+
+	mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_VM);
+	pv_unmanaged.psz = pserialize_create();
+	pv_unmanaged.list = NULL;
+}
+
+void
+pmap_pv_track(paddr_t start, psize_t size)
+{
+	struct pv_track *pvt;
+	size_t npages;
+
+	KASSERT(start == trunc_page(start));
+	KASSERT(size == trunc_page(size));
+
+	npages = size >> PAGE_SHIFT;
+	pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
+	    KM_SLEEP);
+	pvt->pvt_start = start;
+	pvt->pvt_size = size;
+
+	mutex_enter(&pv_unmanaged.lock);
+	pvt->pvt_next = pv_unmanaged.list;
+	membar_producer();
+	pv_unmanaged.list = pvt;
+	mutex_exit(&pv_unmanaged.lock);
+}
+
+void
+pmap_pv_untrack(paddr_t start, psize_t size)
+{
+	struct pv_track **pvtp, *pvt;
+	size_t npages;
+
+	KASSERT(start == trunc_page(start));
+	KASSERT(size == trunc_page(size));
+
+	mutex_enter(&pv_unmanaged.lock);
+	for (pvtp = &pv_unmanaged.list;
+	     (pvt = *pvtp) != NULL;
+	     pvtp = &pvt->pvt_next) {
+		if (pvt->pvt_start != start)
+			continue;
+		if (pvt->pvt_size != size)
+			panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
+			    ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
+			    pvt->pvt_start, pvt->pvt_size, size);
+		*pvtp = pvt->pvt_next;
+		pserialize_perform(pv_unmanaged.psz);
+		pvt->pvt_next = NULL;
+		goto out;
+	}
+	panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
+	    " (0x%"PRIxPSIZE" bytes)",
+	    start, size);
+out:	mutex_exit(&pv_unmanaged.lock);
+
+	npages = size >> PAGE_SHIFT;
+	kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
+}
+
+static struct pmap_page *
+pmap_pv_tracked(paddr_t pa)
+{
+	struct pv_track *pvt;
+	size_t pgno;
+	int s;
+
+	KASSERT(pa == trunc_page(pa));
+
+	s = pserialize_read_enter();
+	for (pvt = pv_unmanaged.list; pvt != NULL; pvt = pvt->pvt_next) {
+		membar_datadep_consumer();
+		if ((pvt->pvt_start <= pa) &&
+		    ((pa - pvt->pvt_start) < pvt->pvt_size))
+			break;
+	}
+	pserialize_read_exit(s);
+
+	if (pvt == NULL)
+		return NULL;
+	KASSERT(pvt->pvt_start <= pa);
+	KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
+	pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
+	return &pvt->pvt_pages[pgno];
+}
+
+/*
  * other data structures
  */
 
@@ -3300,27 +3418,30 @@ pmap_remove_pte(struct pmap *pmap, struc
 	 */
 	if ((opte & PG_PVLIST) == 0) {
 #if defined(DIAGNOSTIC) && !defined(DOM0OPS)
-		if (PHYS_TO_VM_PAGE(pmap_pte2pa(opte)) != NULL)
-			panic("pmap_remove_pte: managed page without "
-			      "PG_PVLIST for %#" PRIxVADDR, va);
+		if (PHYS_TO_VM_PAGE(pmap_pte2pa(opte)) != NULL ||
+		    pmap_pv_tracked(pmap_pte2pa(opte)) != NULL)
+			panic("pmap_remove_pte: managed or pv-tracked page"
+			    " without PG_PVLIST for %#"PRIxVADDR, va);
 #endif
 		return true;
 	}
 
-	pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte));
-
-	KASSERTMSG(pg != NULL, "pmap_remove_pte: unmanaged page marked "
-	    "PG_PVLIST, va = %#" PRIxVADDR ", pa = %#" PRIxPADDR,
-	    va, (paddr_t)pmap_pte2pa(opte));
-
-	KASSERT(uvm_page_locked_p(pg));
+	if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
+		KASSERT(uvm_page_locked_p(pg));
+		pp = VM_PAGE_TO_PP(pg);
+	} else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
+		paddr_t pa = pmap_pte2pa(opte);
+		panic("pmap_remove_pte: PG_PVLIST with pv-untracked page"
+		    " va = 0x%"PRIxVADDR
+		    " pa = 0x%"PRIxPADDR" (0x%"PRIxPADDR")",
+		    va, pa, atop(pa));
+	}
 
 	/* Sync R/M bits. */
-	pp = VM_PAGE_TO_PP(pg);
 	pp->pp_attrs |= opte;
 	pve = pmap_remove_pv(pp, ptp, va);
 
-	if (pve) { 
+	if (pve) {
 		pve->pve_next = *pv_tofree;
 		*pv_tofree = pve;
 	}
@@ -3545,26 +3666,16 @@ pmap_sync_pv(struct pv_pte *pvpte, pt_en
 	return 0;
 }
 
-/*
- * pmap_page_remove: remove a managed vm_page from all pmaps that map it
- *
- * => R/M bits are sync'd back to attrs
- */
-
-void
-pmap_page_remove(struct vm_page *pg)
+static void
+pmap_pp_remove(struct pmap_page *pp, paddr_t pa)
 {
-	struct pmap_page *pp;
 	struct pv_pte *pvpte;
 	struct pv_entry *killlist = NULL;
 	struct vm_page *ptp;
 	pt_entry_t expect;
 	int count;
 
-	KASSERT(uvm_page_locked_p(pg));
-
-	pp = VM_PAGE_TO_PP(pg);
-	expect = pmap_pa2pte(VM_PAGE_TO_PHYS(pg)) | PG_V;
+	expect = pmap_pa2pte(pa) | PG_V;
 	count = SPINLOCK_BACKOFF_MIN;
 	kpreempt_disable();
 startover:
@@ -3637,6 +3748,42 @@ startover:
 }
 
 /*
+ * pmap_page_remove: remove a managed vm_page from all pmaps that map it
+ *
+ * => R/M bits are sync'd back to attrs
+ */
+
+void
+pmap_page_remove(struct vm_page *pg)
+{
+	struct pmap_page *pp;
+	paddr_t pa;
+
+	KASSERT(uvm_page_locked_p(pg));
+
+	pp = VM_PAGE_TO_PP(pg);
+	pa = VM_PAGE_TO_PHYS(pg);
+	pmap_pp_remove(pp, pa);
+}
+
+/*
+ * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps
+ *	that map it
+ */
+
+void
+pmap_pv_remove(paddr_t pa)
+{
+	struct pmap_page *pp;
+
+	pp = pmap_pv_tracked(pa);
+	if (pp == NULL)
+		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
+		    pa);
+	pmap_pp_remove(pp, pa);
+}
+
+/*
  * p m a p   a t t r i b u t e  f u n c t i o n s
  * functions that test/change managed page's attributes
  * since a page can be mapped multiple times we must check each PTE that
@@ -3686,25 +3833,15 @@ pmap_test_attrs(struct vm_page *pg, unsi
 	return result != 0;
 }
 
-/*
- * pmap_clear_attrs: clear the specified attribute for a page.
- *
- * => we return true if we cleared one of the bits we were asked to
- */
-
-bool
-pmap_clear_attrs(struct vm_page *pg, unsigned clearbits)
+static bool
+pmap_pp_clear_attrs(struct pmap_page *pp, paddr_t pa, unsigned clearbits)
 {
-	struct pmap_page *pp;
 	struct pv_pte *pvpte;
 	u_int result;
 	pt_entry_t expect;
 	int count;
 
-	KASSERT(uvm_page_locked_p(pg));
-
-	pp = VM_PAGE_TO_PP(pg);
-	expect = pmap_pa2pte(VM_PAGE_TO_PHYS(pg)) | PG_V;
+	expect = pmap_pa2pte(pa) | PG_V;
 	count = SPINLOCK_BACKOFF_MIN;
 	kpreempt_disable();
 startover:
@@ -3729,6 +3866,43 @@ startover:
 	return result != 0;
 }
 
+/*
+ * pmap_clear_attrs: clear the specified attribute for a page.
+ *
+ * => we return true if we cleared one of the bits we were asked to
+ */
+
+bool
+pmap_clear_attrs(struct vm_page *pg, unsigned clearbits)
+{
+	struct pmap_page *pp;
+	paddr_t pa;
+
+	KASSERT(uvm_page_locked_p(pg));
+
+	pp = VM_PAGE_TO_PP(pg);
+	pa = VM_PAGE_TO_PHYS(pg);
+
+	return pmap_pp_clear_attrs(pp, pa, clearbits);
+}
+
+/*
+ * pmap_pv_clear_attrs: clear the specified attributes for an unmanaged
+ *	pv-tracked page.
+ */
+
+bool
+pmap_pv_clear_attrs(paddr_t pa, unsigned clearbits)
+{
+	struct pmap_page *pp;
+
+	pp = pmap_pv_tracked(pa);
+	if (pp == NULL)
+		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
+		    pa);
+
+	return pmap_pp_clear_attrs(pp, pa, clearbits);
+}
 
 /*
  * p m a p   p r o t e c t i o n   f u n c t i o n s
@@ -3744,6 +3918,15 @@ startover:
 /* see pmap.h */
 
 /*
+ * pmap_pv_protect: change the protection of all recorded mappings
+ *	of an unmanaged pv-tracked page
+ *
+ * => NOTE: this is an inline function in pmap.h
+ */
+
+/* see pmap.h */
+
+/*
  * pmap_protect: set the protection in of the pages in a pmap
  *
  * => NOTE: this is an inline function in pmap.h
@@ -3900,9 +4083,9 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t
 	pt_entry_t *ptes, opte, npte;
 	pt_entry_t *ptep;
 	pd_entry_t * const *pdes;
-	struct vm_page *ptp, *pg;
-	struct pmap_page *new_pp;
-	struct pmap_page *old_pp;
+	struct vm_page *ptp;
+	struct vm_page *new_pg, *old_pg;
+	struct pmap_page *new_pp, *old_pp;
 	struct pv_entry *old_pve = NULL;
 	struct pv_entry *new_pve;
 	struct pv_entry *new_pve2;
@@ -3945,14 +4128,17 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t
 
 #ifdef XEN
 	if (domid != DOMID_SELF)
-		pg = NULL;
+		new_pg = NULL;
 	else
 #endif
-		pg = PHYS_TO_VM_PAGE(pa);
-	if (pg != NULL) {
+		new_pg = PHYS_TO_VM_PAGE(pa);
+	if (new_pg != NULL) {
 		/* This is a managed page */
 		npte |= PG_PVLIST;
-		new_pp = VM_PAGE_TO_PP(pg);
+		new_pp = VM_PAGE_TO_PP(new_pg);
+	} else if ((new_pp = pmap_pv_tracked(pa)) != NULL) {
+		/* This is an unmanaged pv-tracked page */
+		npte |= PG_PVLIST;
 	} else {
 		new_pp = NULL;
 	}
@@ -4041,25 +4227,28 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t
 	}
 
 	/*
-	 * if old page is managed, remove pv_entry from its list.
+	 * if old page is pv-tracked, remove pv_entry from its list.
 	 */
 
 	if ((~opte & (PG_V | PG_PVLIST)) == 0) {
-		pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte));
-
-		KASSERTMSG(pg != NULL, "pmap_enter: PG_PVLIST mapping with "
-		    "unmanaged page pa = 0x%" PRIx64 " (0x%" PRIx64 ")",
-		    (int64_t)pa, (int64_t)atop(pa));
-
-		KASSERT(uvm_page_locked_p(pg));
+		if ((old_pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
+			KASSERT(uvm_page_locked_p(old_pg));
+			old_pp = VM_PAGE_TO_PP(old_pg);
+		} else if ((old_pp = pmap_pv_tracked(pmap_pte2pa(opte)))
+		    == NULL) {
+			pa = pmap_pte2pa(opte);
+			panic("pmap_enter: PG_PVLIST with pv-untracked page"
+			    " va = 0x%"PRIxVADDR
+			    " pa = 0x%" PRIxPADDR " (0x%" PRIxPADDR ")",
+			    va, pa, atop(pa));
+		}
 
-		old_pp = VM_PAGE_TO_PP(pg);
 		old_pve = pmap_remove_pv(old_pp, ptp, va);
 		old_pp->pp_attrs |= opte;
 	}
 
 	/*
-	 * if new page is managed, insert pv_entry into its list.
+	 * if new page is pv-tracked, insert pv_entry into its list.
 	 */
 
 	if (new_pp) {
Index: sys/external/bsd/drm2/dist/drm/i915/i915_dma.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c,v
retrieving revision 1.15
diff -p -u -r1.15 i915_dma.c
--- sys/external/bsd/drm2/dist/drm/i915/i915_dma.c	28 Feb 2015 18:25:39 -0000	1.15
+++ sys/external/bsd/drm2/dist/drm/i915/i915_dma.c	23 Mar 2015 13:13:56 -0000
@@ -1734,6 +1734,8 @@ int i915_driver_load(struct drm_device *
 	dev_priv->gtt.mappable =
 	    drm_io_mapping_create_wc(dev, dev_priv->gtt.mappable_base,
 		aperture_size);
+	/* Note: mappable_end is the size, not end paddr, of the aperture.  */
+	pmap_pv_track(dev_priv->gtt.mappable_base, dev_priv->gtt.mappable_end);
 #else
 	dev_priv->gtt.mappable =
 		io_mapping_create_wc(dev_priv->gtt.mappable_base,
@@ -1851,6 +1853,11 @@ out_gem_unload:
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
+#ifdef __NetBSD__
+	/* Note: mappable_end is the size, not end paddr, of the aperture.  */
+	pmap_pv_untrack(dev_priv->gtt.mappable_base,
+	    dev_priv->gtt.mappable_end);
+#endif
 	io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
 	list_del(&dev_priv->gtt.base.global_link);
Index: sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c,v
retrieving revision 1.28
diff -p -u -r1.28 i915_gem.c
--- sys/external/bsd/drm2/dist/drm/i915/i915_gem.c	6 Mar 2015 22:24:05 -0000	1.28
+++ sys/external/bsd/drm2/dist/drm/i915/i915_gem.c	23 Mar 2015 13:13:56 -0000
@@ -2121,32 +2121,19 @@ i915_gem_release_mmap(struct drm_i915_ge
 
 #ifdef __NetBSD__		/* XXX gem gtt fault */
 	{
-		struct vm_page *page;
+		struct drm_device *const dev = obj->base.dev;
+		struct drm_i915_private *const dev_priv = dev->dev_private;
+		const paddr_t start = dev_priv->gtt.mappable_base +
+		    i915_gem_obj_ggtt_offset(obj);
+		const size_t size = obj->base.size;
+		const paddr_t end = start + size;
+		paddr_t pa;
 
-		mutex_enter(obj->base.gemo_shm_uao->vmobjlock);
-		KASSERT(obj->pages != NULL);
-		/* Force a fresh fault for each page.  */
-		/*
-		 * XXX OOPS!  This doesn't actually do what we want.
-		 * This causes a fresh fault for access to the backing
-		 * pages -- but nothing accesses the backing pages
-		 * directly!  What is actually entered into CPU page
-		 * table entries is aperture addresses which have been
-		 * programmed by the GTT to refer to those backing
-		 * pages.
-		 *
-		 * We need to clear those page table entries, but
-		 * there's no good way to do that at the moment: nobody
-		 * records for us a map from either uvm objects or
-		 * physical device addresses to a list of all virtual
-		 * pages where they have been mapped.  pmap(9) records
-		 * a map only from physical RAM addresses to virtual
-		 * pages; it does nothing for physical device
-		 * addresses.
-		 */
-		TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue)
-			pmap_page_protect(page, VM_PROT_NONE);
-		mutex_exit(obj->base.gemo_shm_uao->vmobjlock);
+		KASSERT((start & (PAGE_SIZE - 1)) == 0);
+		KASSERT((size & (PAGE_SIZE - 1)) == 0);
+
+		for (pa = start; pa < end; pa += PAGE_SIZE)
+			pmap_pv_protect(pa, VM_PROT_NONE);
 	}
 #else
 	drm_vma_node_unmap(&obj->base.vma_node,
Index: sys/external/bsd/drm2/dist/drm/nouveau/nouveau_agp.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_agp.c,v
retrieving revision 1.2
diff -p -u -r1.2 nouveau_agp.c
--- sys/external/bsd/drm2/dist/drm/nouveau/nouveau_agp.c	6 Aug 2014 13:35:13 -0000	1.2
+++ sys/external/bsd/drm2/dist/drm/nouveau/nouveau_agp.c	23 Mar 2015 13:20:37 -0000
@@ -190,6 +190,9 @@ nouveau_agp_init(struct nouveau_drm *drm
 	drm->agp.stat = ENABLED;
 	drm->agp.base = info.aperture_base;
 	drm->agp.size = info.aperture_size;
+#ifdef __NetBSD__
+	pmap_pv_track(drm->agp.base, drm->agp.size);
+#endif
 #endif
 }
 
@@ -198,7 +201,11 @@ nouveau_agp_fini(struct nouveau_drm *drm
 {
 #if __OS_HAS_AGP
 	struct drm_device *dev = drm->dev;
-	if (dev->agp && dev->agp->acquired)
+	if (dev->agp && dev->agp->acquired) {
+#ifdef __NetBSD__
+		pmap_pv_untrack(drm->agp.base, drm->agp.size);
+#endif
 		drm_agp_release(dev);
+	}
 #endif
 }
Index: sys/external/bsd/drm2/dist/drm/nouveau/nouveau_ttm.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_ttm.c,v
retrieving revision 1.3
diff -p -u -r1.3 nouveau_ttm.c
--- sys/external/bsd/drm2/dist/drm/nouveau/nouveau_ttm.c	25 Feb 2015 22:12:00 -0000	1.3
+++ sys/external/bsd/drm2/dist/drm/nouveau/nouveau_ttm.c	23 Mar 2015 13:20:37 -0000
@@ -443,6 +443,11 @@ nouveau_ttm_init(struct nouveau_drm *drm
 	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
 					 nv_device_resource_len(device, 1));
 
+#ifdef __NetBSD__
+	pmap_pv_track(nv_device_resource_start(device, 1),
+	    nv_device_resource_len(device, 1));
+#endif
+
 	/* GART init */
 	if (drm->agp.stat != ENABLED) {
 		drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
@@ -476,4 +481,9 @@ nouveau_ttm_fini(struct nouveau_drm *drm
 
 	arch_phys_wc_del(drm->ttm.mtrr);
 	drm->ttm.mtrr = 0;
+
+#ifdef __NetBSD__
+	pmap_pv_untrack(nv_device_resource_start(nv_device(drm->device), 1),
+	    nv_device_resource_len(nv_device(drm->device), 1));
+#endif
 }
Index: sys/external/bsd/drm2/dist/drm/radeon/radeon_agp.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/radeon/radeon_agp.c,v
retrieving revision 1.2
diff -p -u -r1.2 radeon_agp.c
--- sys/external/bsd/drm2/dist/drm/radeon/radeon_agp.c	16 Jul 2014 20:59:57 -0000	1.2
+++ sys/external/bsd/drm2/dist/drm/radeon/radeon_agp.c	23 Mar 2015 13:13:57 -0000
@@ -248,6 +248,10 @@ int radeon_agp_init(struct radeon_device
 	dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%08"PRIX64" - 0x%08"PRIX64"\n",
 		rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
 
+#ifdef __NetBSD__
+	pmap_pv_track(rdev->mc.agp_base, rdev->mc.gtt_size);
+#endif
+
 	/* workaround some hw issues */
 	if (rdev->family < CHIP_R200) {
 		WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
@@ -274,6 +278,9 @@ void radeon_agp_fini(struct radeon_devic
 {
 #if __OS_HAS_AGP
 	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+#ifdef __NetBSD__
+		pmap_pv_untrack(rdev->mc.agp_base, rdev->mc.gtt_size);
+#endif
 		drm_agp_release(rdev->ddev);
 	}
 #endif
Index: sys/external/bsd/drm2/dist/drm/radeon/radeon_object.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/radeon/radeon_object.c,v
retrieving revision 1.2
diff -p -u -r1.2 radeon_object.c
--- sys/external/bsd/drm2/dist/drm/radeon/radeon_object.c	16 Jul 2014 20:59:57 -0000	1.2
+++ sys/external/bsd/drm2/dist/drm/radeon/radeon_object.c	23 Mar 2015 13:13:57 -0000
@@ -366,6 +366,10 @@ int radeon_bo_init(struct radeon_device 
 		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
 						      rdev->mc.aper_size);
 	}
+#ifdef __NetBSD__
+	if (rdev->mc.aper_base)
+		pmap_pv_track(rdev->mc.aper_base, rdev->mc.aper_size);
+#endif
 	DRM_INFO("Detected VRAM RAM=%"PRIx64"M, BAR=%lluM\n",
 		rdev->mc.mc_vram_size >> 20,
 		(unsigned long long)rdev->mc.aper_size >> 20);
@@ -377,6 +381,10 @@ int radeon_bo_init(struct radeon_device 
 void radeon_bo_fini(struct radeon_device *rdev)
 {
 	radeon_ttm_fini(rdev);
+#ifdef __NetBSD__
+	if (rdev->mc.aper_base)
+		pmap_pv_untrack(rdev->mc.aper_base, rdev->mc.aper_size);
+#endif
 	arch_phys_wc_del(rdev->mc.vram_mtrr);
 }
 
Index: sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c,v
retrieving revision 1.6
diff -p -u -r1.6 ttm_bo.c
--- sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c	18 Aug 2014 02:43:27 -0000	1.6
+++ sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c	23 Mar 2015 13:13:57 -0000
@@ -1611,11 +1611,16 @@ void ttm_bo_unmap_virtual_locked(struct 
 
 #ifdef __NetBSD__
 	if (bo->mem.bus.is_iomem) {
-		/*
-		 * XXX OOPS!  NetBSD doesn't have a way to enumerate
-		 * and remove the virtual mappings for device addresses
-		 * or of a uvm object.
-		 */
+		paddr_t start, end, pa;
+
+		KASSERT((bo->mem.bus.base & (PAGE_SIZE - 1)) == 0);
+		KASSERT((bo->mem.bus.offset & (PAGE_SIZE - 1)) == 0);
+		start = bo->mem.bus.base + bo->mem.bus.offset;
+		KASSERT((bo->mem.bus.size & (PAGE_SIZE - 1)) == 0);
+		end = start + bo->mem.bus.size;
+
+		for (pa = start; pa < end; pa += PAGE_SIZE)
+			pmap_pv_protect(pa, VM_PROT_NONE);
 	} else if (bo->ttm != NULL) {
 		unsigned i;
 


Home | Main Index | Thread Index | Old Index