NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: kern/57833: kernel panic on xorg exit



OK, I think I found the problem with my patch: a side effect of
Linux's backwards definition of PAGE_MASK from NetBSD's, causing
round_page(size) to return 0 instead of 0x4000 when size=0x4000.

Try this one?

(Posting this again as one giant diff in .diff, and an incremental
git patch series in .patch, in case that's helpful, whichever is
easier for you to deal with.)
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
index 8e592f008ef7..d0265738e2c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
@@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 
 	if (!i915_gem_object_has_pages(obj))
 		return;
-	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+	for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
 		page = obj->mm.pages->sgl->sg_pgs[i];
 		vm_page = &page->p_vmp;
 		pmap_page_protect(vm_page, VM_PROT_NONE);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
index 58075877254a..2bc7b11695ec 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
@@ -42,6 +42,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 
 #ifndef __NetBSD__
+	/*
+	 * Paranoia: In NetBSD, a scatterlist is just an array of
+	 * pages, not an array of segments that might be larger than
+	 * pages, so the number of entries must exactly match the size
+	 * of the object (which should also be page-aligned).
+	 *
+	 * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in
+	 * i915_gem_mman.c rely on this page array as such.
+	 */
+	KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT,
+	    "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size);
+
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 #endif
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
index 3f68276eea5f..257f386695f3 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
@@ -9,6 +9,90 @@
 #include <sys/cdefs.h>
 __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh Exp $");
 
+#ifdef __NetBSD__
+/*
+ * Make sure this block comes before any linux includes, so we don't
+ * get mixed up by the PAGE_MASK complementation.
+ */
+
+#include <sys/bus.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
+
+/*
+ * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
+ * so that it isn't affected by pmap_page_protect on the physical
+ * address.  Adapted from sys/arch/x86/x86/bus_dma.c.
+ */
+static int
+bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+    size_t size, void **kvap, int flags)
+{
+	vaddr_t va;
+	bus_addr_t addr;
+	int curseg;
+	const uvm_flag_t kmflags =
+	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
+	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
+
+	size = round_page(size);
+	if (flags & BUS_DMA_NOCACHE)
+		pmapflags |= PMAP_NOCACHE;
+
+	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
+
+	if (va == 0)
+		return ENOMEM;
+
+	*kvap = (void *)va;
+
+	for (curseg = 0; curseg < nsegs; curseg++) {
+		for (addr = segs[curseg].ds_addr;
+		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+			if (size == 0)
+				panic("bus_dmamem_kmap: size botch");
+			pmap_kenter_pa(va, addr,
+			    VM_PROT_READ | VM_PROT_WRITE,
+			    pmapflags);
+		}
+	}
+	pmap_update(pmap_kernel());
+
+	return 0;
+}
+
+static void
+bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+	pt_entry_t *pte, opte;
+	vaddr_t va, sva, eva;
+
+	KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
+
+	size = round_page(size);
+	sva = (vaddr_t)kva;
+	eva = sva + size;
+
+	/*
+	 * mark pages cacheable again.
+	 */
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		pte = kvtopte(va);
+		opte = *pte;
+		if ((opte & PTE_PCD) != 0)
+			pmap_pte_clearbits(pte, PTE_PCD);
+	}
+	pmap_kremove((vaddr_t)kva, size);
+	pmap_update(pmap_kernel());
+	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
+}
+
+#endif
+
 #include <linux/highmem.h>
 #include <linux/shmem_fs.h>
 #include <linux/swap.h>
@@ -65,7 +149,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (ret)
 		return -ENOMEM;
 	KASSERT(rsegs == 1);
-	ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
+	ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
 	    roundup_pow_of_two(obj->base.size), &vaddr,
 	    BUS_DMA_WAITOK|BUS_DMA_COHERENT);
 	if (ret)
@@ -83,7 +167,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (!st)
 		goto err_pci;
 
+#ifdef __NetBSD__
+	if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
+		GFP_KERNEL))
+#else
 	if (sg_alloc_table(st, 1, GFP_KERNEL))
+#endif
 		goto err_st;
 
 	sg = st->sgl;
@@ -151,7 +240,7 @@ err_st:
 err_pci:
 #ifdef __NetBSD__
 	if (vaddr) {
-		bus_dmamem_unmap(dmat, vaddr,
+		bus_dmamem_kunmap(dmat, vaddr,
 		    roundup_pow_of_two(obj->base.size));
 	}
 	obj->mm.u.phys.kva = NULL;
@@ -225,7 +314,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	kfree(pages);
 
 #ifdef __NetBSD__
-	bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
+	bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
 	    roundup_pow_of_two(obj->base.size));
 	obj->mm.u.phys.kva = NULL;
 	bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index 1a928988bd5f..72efd363fdc7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (!st)
 		return -ENOMEM;
 
+#ifndef __NetBSD__
 	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
 		kfree(st);
 		return -ENOMEM;
 	}
+#endif
 
 	flags = I915_ALLOC_MIN_PAGE_SIZE;
 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
@@ -60,15 +62,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(list_empty(blocks));
 
-	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
-	__USE(sg_page_sizes);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
 	bus_dma_segment_t *segs = NULL;
 	int i = 0, nsegs = 0;
 	bool loaded = false;
 
+	sg = NULL;
+
 	list_for_each_entry(block, blocks, link) {
 		if (nsegs >= INT_MAX ||
 		    nsegs >= SIZE_MAX/sizeof(segs[0]))
@@ -85,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 		segs[i].ds_addr = mem->region.start + offset;
 		segs[i].ds_len = block_size;
+		i++;
 	}
+	KASSERT(i == nsegs);
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
+	    GFP_KERNEL);
+	if (ret)
+		goto err;
+	sg = st->sgl;
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
@@ -106,8 +116,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	kmem_free(segs, nsegs * sizeof(segs[0]));
 	segs = NULL;
 
-	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg));
+	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
+	sg = st->sgl;
 	st->nents = 0;
 	sg_page_sizes = 0;
 	prev_end = (resource_size_t)-1;
@@ -145,9 +156,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg_page_sizes |= sg->length;
 	sg_mark_end(sg);
 	i915_sg_trim(st);
+#endif
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
-#endif
 
 	return 0;
 
@@ -155,6 +166,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 err:
 	if (loaded)
 		bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+	if (sg && sg->sg_dmamap)
+		bus_dmamap_destroy(dmat, sg->sg_dmamap);
 	if (segs)
 		kmem_free(segs, nsegs * sizeof(segs[0]));
 	__intel_memory_region_put_pages_buddy(mem, blocks);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index b53d543fa894..1635e57befd7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 {
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct sg_table *st;
+	struct scatterlist *sg;
 #ifdef __NetBSD__
 	bus_dma_tag_t dmat = i915->drm.dmat;
 	bus_dma_segment_t *seg = NULL;
 	int nseg = 0, i;
 	bool loaded = false;
 	int ret;
-#else
-	struct scatterlist *sg;
 #endif
 
 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
@@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
 #ifdef __NetBSD__
 	KASSERT((size % PAGE_SIZE) == 0);
 	nseg = size / PAGE_SIZE;
@@ -548,19 +542,30 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 		seg[i].ds_len = PAGE_SIZE;
 	}
 
+	sg = NULL;
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
+		    ret);
+		ret = -ENOMEM;
+		goto out;
+	}
+	sg = st->sgl;
+
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
-	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
+	    BUS_DMA_WAITOK, &sg->sg_dmamap);
 	if (ret) {
 		DRM_ERROR("failed to create DMA map for stolen object: %d\n",
 		    ret);
-		st->sgl->sg_dmamap = NULL;
+		sg->sg_dmamap = NULL;
 		goto out;
 	}
-	st->sgl->sg_dmat = dmat;
+	sg->sg_dmat = dmat;
 
-	/* XXX errno NetBSD->Liux */
-	ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size,
 	    BUS_DMA_WAITOK);
 	if (ret) {
 		DRM_ERROR("failed to load DMA map for stolen object: %d\n",
@@ -569,14 +574,23 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	}
 	loaded = true;
 
-out:	if (ret) {
+out:	kmem_free(seg, nseg * sizeof(seg[0]));
+	if (ret) {
 		if (loaded)
-			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
-		sg_free_table(st);
+			bus_dmamap_unload(dmat, sg->sg_dmamap);
+		if (sg && sg->sg_dmamap)
+			bus_dmamap_destroy(dmat, sg->sg_dmamap);
+		if (sg)
+			sg_free_table(st);
 		kfree(st);
 		return ERR_PTR(ret);
 	}
 #else
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	sg = st->sgl;
 	sg->offset = 0;
 	sg->length = size;
From 7fb48cd42a6dc1e58f9c6b27c7853d721d9f96da Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:38:08 +0000
Subject: [PATCH 1/8] i915_gem_region: Reduce diff from upstream a little.

No functional change intended.
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index 1a928988bd5f..e539dc4530dc 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -63,7 +63,6 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
-	__USE(sg_page_sizes);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
 	bus_dma_segment_t *segs = NULL;
 	int i = 0, nsegs = 0;
@@ -106,7 +105,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	kmem_free(segs, nsegs * sizeof(segs[0]));
 	segs = NULL;
 
-	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg));
+	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
 	st->nents = 0;
 	sg_page_sizes = 0;
@@ -145,9 +144,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg_page_sizes |= sg->length;
 	sg_mark_end(sg);
 	i915_sg_trim(st);
+#endif
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
-#endif
 
 	return 0;
 

From 97510e276148cfc6f223b397704b3d81905f8eb0 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:38:38 +0000
Subject: [PATCH 2/8] i915_gem: Avoid walking off end of sg_pgs.

sg_npgs currently fails to match obj->base.size/PAGE_SIZE only due to
bugs in the construction of sg_pgs in various i915 gem object types,
which we should also fix, but let's avoid compounding it here.

Related to PR kern/57833.

XXX pullup-10
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
index 8e592f008ef7..d0265738e2c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
@@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 
 	if (!i915_gem_object_has_pages(obj))
 		return;
-	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+	for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
 		page = obj->mm.pages->sgl->sg_pgs[i];
 		vm_page = &page->p_vmp;
 		pmap_page_protect(vm_page, VM_PROT_NONE);

From 4fabd36d0f4f0b06ff600d91789c56730a6698a7 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Wed, 17 Jan 2024 01:33:22 +0000
Subject: [PATCH 3/8] i915_gem: Assert page array size.

Let's detect the bug of sg_npgs failing to match
obj->base.size/PAGE_SIZE earlier.

Related to PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c      | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
index 58075877254a..2bc7b11695ec 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
@@ -42,6 +42,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 
 #ifndef __NetBSD__
+	/*
+	 * Paranoia: In NetBSD, a scatterlist is just an array of
+	 * pages, not an array of segments that might be larger than
+	 * pages, so the number of entries must exactly match the size
+	 * of the object (which should also be page-aligned).
+	 *
+	 * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in
+	 * i915_gem_mman.c rely on this page array as such.
+	 */
+	KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT,
+	    "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size);
+
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 #endif

From b63c51da1202bfbe0a633a72ca9b7e2575dfb3ca Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:49:04 +0000
Subject: [PATCH 4/8] i915_gem_phys: Fill sg_pgs.

This is needed by i915 gem fault, which maps user virtual addresses
to those pages, and by i915 gem object destruction, which does
pmap_page_protect on the pages to remove any of those user virtual
mappings.

This needs pmap_kenter_pa rather than pmap_enter(pmap_kernel(), ...)
in order to preserve the _kernel's_ mapping of the pages after
pmap_page_protect.

But bus_dmamem_map currently uses pmap_enter(pmap_kernel(), ...)
instead which creates a mapping that is removed by pmap_page_protect.

So we use a variant of bus_dmamem_map that uses pmap_kenter_pa
instead.  Perhaps bus_dmamem_map should do this itself, but this
change is less risky to pull up than a change to bus_dmamem_map
itself.

PR kern/57833: kernel panic on xorg exit

XXX pullup-10
---
 .../drm2/dist/drm/i915/gem/i915_gem_phys.c    | 95 ++++++++++++++++++-
 1 file changed, 92 insertions(+), 3 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
index 3f68276eea5f..257f386695f3 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
@@ -9,6 +9,90 @@
 #include <sys/cdefs.h>
 __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh Exp $");
 
+#ifdef __NetBSD__
+/*
+ * Make sure this block comes before any linux includes, so we don't
+ * get mixed up by the PAGE_MASK complementation.
+ */
+
+#include <sys/bus.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
+
+/*
+ * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
+ * so that it isn't affected by pmap_page_protect on the physical
+ * address.  Adapted from sys/arch/x86/x86/bus_dma.c.
+ */
+static int
+bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+    size_t size, void **kvap, int flags)
+{
+	vaddr_t va;
+	bus_addr_t addr;
+	int curseg;
+	const uvm_flag_t kmflags =
+	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
+	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
+
+	size = round_page(size);
+	if (flags & BUS_DMA_NOCACHE)
+		pmapflags |= PMAP_NOCACHE;
+
+	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
+
+	if (va == 0)
+		return ENOMEM;
+
+	*kvap = (void *)va;
+
+	for (curseg = 0; curseg < nsegs; curseg++) {
+		for (addr = segs[curseg].ds_addr;
+		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+			if (size == 0)
+				panic("bus_dmamem_kmap: size botch");
+			pmap_kenter_pa(va, addr,
+			    VM_PROT_READ | VM_PROT_WRITE,
+			    pmapflags);
+		}
+	}
+	pmap_update(pmap_kernel());
+
+	return 0;
+}
+
+static void
+bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+	pt_entry_t *pte, opte;
+	vaddr_t va, sva, eva;
+
+	KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
+
+	size = round_page(size);
+	sva = (vaddr_t)kva;
+	eva = sva + size;
+
+	/*
+	 * mark pages cacheable again.
+	 */
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		pte = kvtopte(va);
+		opte = *pte;
+		if ((opte & PTE_PCD) != 0)
+			pmap_pte_clearbits(pte, PTE_PCD);
+	}
+	pmap_kremove((vaddr_t)kva, size);
+	pmap_update(pmap_kernel());
+	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
+}
+
+#endif
+
 #include <linux/highmem.h>
 #include <linux/shmem_fs.h>
 #include <linux/swap.h>
@@ -65,7 +149,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (ret)
 		return -ENOMEM;
 	KASSERT(rsegs == 1);
-	ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
+	ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
 	    roundup_pow_of_two(obj->base.size), &vaddr,
 	    BUS_DMA_WAITOK|BUS_DMA_COHERENT);
 	if (ret)
@@ -83,7 +167,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (!st)
 		goto err_pci;
 
+#ifdef __NetBSD__
+	if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
+		GFP_KERNEL))
+#else
 	if (sg_alloc_table(st, 1, GFP_KERNEL))
+#endif
 		goto err_st;
 
 	sg = st->sgl;
@@ -151,7 +240,7 @@ err_st:
 err_pci:
 #ifdef __NetBSD__
 	if (vaddr) {
-		bus_dmamem_unmap(dmat, vaddr,
+		bus_dmamem_kunmap(dmat, vaddr,
 		    roundup_pow_of_two(obj->base.size));
 	}
 	obj->mm.u.phys.kva = NULL;
@@ -225,7 +314,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	kfree(pages);
 
 #ifdef __NetBSD__
-	bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
+	bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
 	    roundup_pow_of_two(obj->base.size));
 	obj->mm.u.phys.kva = NULL;
 	bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);

From 3229d55123905abf7bc15757d6a6a06e3bac9e8e Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:55:15 +0000
Subject: [PATCH 5/8] i915_gem_region: Fill sg_pgs, with size/PAGE_SIZE
 entries.

Use sg_alloc_table_from_bus_dmamem to do this.

i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset
both rely on sg_pgs to be a page array, so using something else like
size >> ilog2(mem->mm.chunk_size) entries doesn't work.  And they
rely on the sg_pgs entries to be initialized, which we weren't doing
before, and which sg_alloc_table_from_bus_dmamem does for us.

Related to PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_region.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index e539dc4530dc..72efd363fdc7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (!st)
 		return -ENOMEM;
 
+#ifndef __NetBSD__
 	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
 		kfree(st);
 		return -ENOMEM;
 	}
+#endif
 
 	flags = I915_ALLOC_MIN_PAGE_SIZE;
 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
@@ -60,7 +62,6 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(list_empty(blocks));
 
-	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
@@ -68,6 +69,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	int i = 0, nsegs = 0;
 	bool loaded = false;
 
+	sg = NULL;
+
 	list_for_each_entry(block, blocks, link) {
 		if (nsegs >= INT_MAX ||
 		    nsegs >= SIZE_MAX/sizeof(segs[0]))
@@ -84,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 		segs[i].ds_addr = mem->region.start + offset;
 		segs[i].ds_len = block_size;
+		i++;
 	}
+	KASSERT(i == nsegs);
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
+	    GFP_KERNEL);
+	if (ret)
+		goto err;
+	sg = st->sgl;
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
@@ -107,6 +118,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
+	sg = st->sgl;
 	st->nents = 0;
 	sg_page_sizes = 0;
 	prev_end = (resource_size_t)-1;
@@ -154,6 +166,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 err:
 	if (loaded)
 		bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+	if (sg && sg->sg_dmamap)
+		bus_dmamap_destroy(dmat, sg->sg_dmamap);
 	if (segs)
 		kmem_free(segs, nsegs * sizeof(segs[0]));
 	__intel_memory_region_put_pages_buddy(mem, blocks);

From c5314565868d7d7180b329eac2a88d02914d31f9 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:28:23 +0000
Subject: [PATCH 6/8] i915_gem_stolen: Fix memory leak.

Found while trying to address the PR 57833 class of problems.

XXX pullup-10
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index b53d543fa894..9ad5df8c75c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -569,7 +569,8 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	}
 	loaded = true;
 
-out:	if (ret) {
+out:	kmem_free(seg, nseg * sizeof(seg[0]));
+	if (ret) {
 		if (loaded)
 			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
 		sg_free_table(st);

From 86efff2fa17f45b558e29e065f3616e891b1393f Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:40:20 +0000
Subject: [PATCH 7/8] i915_gem_stolen: Fill sg_pgs, with size/PAGE_SIZE
 entries.

Use sg_alloc_table_from_bus_dmamem to do this.

i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset
both rely on sg_pgs to be a page array, so providing a table with
only one entry doesn't work (except by accident, if the object is
page-sized anyway).  And they rely on the sg_pgs entries to be
initialized, which we weren't doing before, and which
sg_alloc_table_from_bus_dmamem does for us.

Related to PR kern/57833.

XXX pullup-10
---
 .../drm2/dist/drm/i915/gem/i915_gem_stolen.c  | 29 ++++++++++++++-----
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index 9ad5df8c75c8..1e7398090d11 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 {
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct sg_table *st;
+	struct scatterlist *sg;
 #ifdef __NetBSD__
 	bus_dma_tag_t dmat = i915->drm.dmat;
 	bus_dma_segment_t *seg = NULL;
 	int nseg = 0, i;
 	bool loaded = false;
 	int ret;
-#else
-	struct scatterlist *sg;
 #endif
 
 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
@@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
 #ifdef __NetBSD__
 	KASSERT((size % PAGE_SIZE) == 0);
 	nseg = size / PAGE_SIZE;
@@ -548,6 +542,17 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 		seg[i].ds_len = PAGE_SIZE;
 	}
 
+	sg = NULL;
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
+		    ret);
+		ret = -ENOMEM;
+		goto out;
+	}
+	sg = st->sgl;
+
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
 	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
@@ -573,11 +578,19 @@ out:	kmem_free(seg, nseg * sizeof(seg[0]));
 	if (ret) {
 		if (loaded)
 			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
-		sg_free_table(st);
+		if (sg && sg->sg_dmamap)
+			bus_dmamap_destroy(dmat, sg->sg_dmamap);
+		if (sg)
+			sg_free_table(st);
 		kfree(st);
 		return ERR_PTR(ret);
 	}
 #else
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	sg = st->sgl;
 	sg->offset = 0;
 	sg->length = size;

From f00c3079c6dc7eb6ff62c12a2b6d08e1144db957 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:42:08 +0000
Subject: [PATCH 8/8] i915_gem_stolen: Take advantage of a local variable.

No functional change intended.

Prompted by PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c     | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index 1e7398090d11..1635e57befd7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -555,17 +555,17 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
-	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
+	    BUS_DMA_WAITOK, &sg->sg_dmamap);
 	if (ret) {
 		DRM_ERROR("failed to create DMA map for stolen object: %d\n",
 		    ret);
-		st->sgl->sg_dmamap = NULL;
+		sg->sg_dmamap = NULL;
 		goto out;
 	}
-	st->sgl->sg_dmat = dmat;
+	sg->sg_dmat = dmat;
 
-	/* XXX errno NetBSD->Liux */
-	ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size,
 	    BUS_DMA_WAITOK);
 	if (ret) {
 		DRM_ERROR("failed to load DMA map for stolen object: %d\n",
@@ -577,7 +577,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 out:	kmem_free(seg, nseg * sizeof(seg[0]));
 	if (ret) {
 		if (loaded)
-			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+			bus_dmamap_unload(dmat, sg->sg_dmamap);
 		if (sg && sg->sg_dmamap)
 			bus_dmamap_destroy(dmat, sg->sg_dmamap);
 		if (sg)


Home | Main Index | Thread Index | Old Index