NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: kern/57833: kernel panic on xorg exit



OK, that's frustrating!  Here's one more change, attached as
pr57833-i915gempages-v5.diff -- this will print some information that
got optimized away and unavailable in gdb.

If it still crashes: Can you find the i915_gem_object_get_pages_phys
frame and share the output of `print *obj', in addition to dmesg
output and `info locals' in all the frames?

Attached as pr57833-i915gempages-v5.patch is a step-by-step patch
series.  If you have the time, and the single giant .diff still
doesn't work, it would be helpful if you could use bisection to find
where the old symptom goes away and the new symptom appears.  (It's
possible I made a mistake in some of the related changes to files
other than i915_gem_phys.c.)
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
index 8e592f008ef7..d0265738e2c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
@@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 
 	if (!i915_gem_object_has_pages(obj))
 		return;
-	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+	for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
 		page = obj->mm.pages->sgl->sg_pgs[i];
 		vm_page = &page->p_vmp;
 		pmap_page_protect(vm_page, VM_PROT_NONE);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
index 58075877254a..2bc7b11695ec 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
@@ -42,6 +42,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 
 #ifndef __NetBSD__
+	/*
+	 * Paranoia: In NetBSD, a scatterlist is just an array of
+	 * pages, not an array of segments that might be larger than
+	 * pages, so the number of entries must exactly match the size
+	 * of the object (which should also be page-aligned).
+	 *
+	 * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in
+	 * i915_gem_mman.c rely on this page array as such.
+	 */
+	KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT,
+	    "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size);
+
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 #endif
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
index 3f68276eea5f..07f7825ec0db 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
@@ -24,6 +24,87 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh
 #include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
+#ifdef __NetBSD__
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
+
+/*
+ * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
+ * so that it isn't affected by pmap_page_protect on the physical
+ * address.  Adapted from sys/arch/x86/x86/bus_dma.c.
+ */
+static int
+bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+    size_t size, void **kvap, int flags)
+{
+	vaddr_t va;
+	bus_addr_t addr;
+	int curseg;
+	const uvm_flag_t kmflags =
+	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
+	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
+
+	printf("%s: t=%p segs=%p nsegs=%d size=0x%zx kvap=%p flags=0x%x\n",
+	    __func__, t, segs, nsegs, size, kvap, flags);
+
+	size = round_page(size);
+	if (flags & BUS_DMA_NOCACHE)
+		pmapflags |= PMAP_NOCACHE;
+
+	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
+
+	if (va == 0)
+		return ENOMEM;
+
+	*kvap = (void *)va;
+
+	for (curseg = 0; curseg < nsegs; curseg++) {
+		for (addr = segs[curseg].ds_addr;
+		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+			if (size == 0)
+				panic("_bus_dmamem_kmap: size botch");
+			pmap_kenter_pa(va, addr,
+			    VM_PROT_READ | VM_PROT_WRITE,
+			    pmapflags);
+		}
+	}
+	pmap_update(pmap_kernel());
+
+	return 0;
+}
+
+static void
+bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+	pt_entry_t *pte, opte;
+	vaddr_t va, sva, eva;
+
+	KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
+
+	size = round_page(size);
+	sva = (vaddr_t)kva;
+	eva = sva + size;
+
+	/*
+	 * mark pages cacheable again.
+	 */
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		pte = kvtopte(va);
+		opte = *pte;
+		if ((opte & PTE_PCD) != 0)
+			pmap_pte_clearbits(pte, PTE_PCD);
+	}
+	pmap_kremove((vaddr_t)kva, size);
+	pmap_update(pmap_kernel());
+	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
+}
+
+#endif
+
 #include <linux/nbsd-namespace.h>
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -65,7 +146,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (ret)
 		return -ENOMEM;
 	KASSERT(rsegs == 1);
-	ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
+	ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
 	    roundup_pow_of_two(obj->base.size), &vaddr,
 	    BUS_DMA_WAITOK|BUS_DMA_COHERENT);
 	if (ret)
@@ -83,7 +164,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (!st)
 		goto err_pci;
 
+#ifdef __NetBSD__
+	if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
+		GFP_KERNEL))
+#else
 	if (sg_alloc_table(st, 1, GFP_KERNEL))
+#endif
 		goto err_st;
 
 	sg = st->sgl;
@@ -151,7 +237,7 @@ err_st:
 err_pci:
 #ifdef __NetBSD__
 	if (vaddr) {
-		bus_dmamem_unmap(dmat, vaddr,
+		bus_dmamem_kunmap(dmat, vaddr,
 		    roundup_pow_of_two(obj->base.size));
 	}
 	obj->mm.u.phys.kva = NULL;
@@ -225,7 +311,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	kfree(pages);
 
 #ifdef __NetBSD__
-	bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
+	bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
 	    roundup_pow_of_two(obj->base.size));
 	obj->mm.u.phys.kva = NULL;
 	bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index 1a928988bd5f..72efd363fdc7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (!st)
 		return -ENOMEM;
 
+#ifndef __NetBSD__
 	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
 		kfree(st);
 		return -ENOMEM;
 	}
+#endif
 
 	flags = I915_ALLOC_MIN_PAGE_SIZE;
 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
@@ -60,15 +62,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(list_empty(blocks));
 
-	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
-	__USE(sg_page_sizes);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
 	bus_dma_segment_t *segs = NULL;
 	int i = 0, nsegs = 0;
 	bool loaded = false;
 
+	sg = NULL;
+
 	list_for_each_entry(block, blocks, link) {
 		if (nsegs >= INT_MAX ||
 		    nsegs >= SIZE_MAX/sizeof(segs[0]))
@@ -85,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 		segs[i].ds_addr = mem->region.start + offset;
 		segs[i].ds_len = block_size;
+		i++;
 	}
+	KASSERT(i == nsegs);
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
+	    GFP_KERNEL);
+	if (ret)
+		goto err;
+	sg = st->sgl;
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
@@ -106,8 +116,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	kmem_free(segs, nsegs * sizeof(segs[0]));
 	segs = NULL;
 
-	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg));
+	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
+	sg = st->sgl;
 	st->nents = 0;
 	sg_page_sizes = 0;
 	prev_end = (resource_size_t)-1;
@@ -145,9 +156,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg_page_sizes |= sg->length;
 	sg_mark_end(sg);
 	i915_sg_trim(st);
+#endif
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
-#endif
 
 	return 0;
 
@@ -155,6 +166,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 err:
 	if (loaded)
 		bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+	if (sg && sg->sg_dmamap)
+		bus_dmamap_destroy(dmat, sg->sg_dmamap);
 	if (segs)
 		kmem_free(segs, nsegs * sizeof(segs[0]));
 	__intel_memory_region_put_pages_buddy(mem, blocks);
diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index b53d543fa894..1635e57befd7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 {
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct sg_table *st;
+	struct scatterlist *sg;
 #ifdef __NetBSD__
 	bus_dma_tag_t dmat = i915->drm.dmat;
 	bus_dma_segment_t *seg = NULL;
 	int nseg = 0, i;
 	bool loaded = false;
 	int ret;
-#else
-	struct scatterlist *sg;
 #endif
 
 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
@@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
 #ifdef __NetBSD__
 	KASSERT((size % PAGE_SIZE) == 0);
 	nseg = size / PAGE_SIZE;
@@ -548,19 +542,30 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 		seg[i].ds_len = PAGE_SIZE;
 	}
 
+	sg = NULL;
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
+		    ret);
+		ret = -ENOMEM;
+		goto out;
+	}
+	sg = st->sgl;
+
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
-	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
+	    BUS_DMA_WAITOK, &sg->sg_dmamap);
 	if (ret) {
 		DRM_ERROR("failed to create DMA map for stolen object: %d\n",
 		    ret);
-		st->sgl->sg_dmamap = NULL;
+		sg->sg_dmamap = NULL;
 		goto out;
 	}
-	st->sgl->sg_dmat = dmat;
+	sg->sg_dmat = dmat;
 
-	/* XXX errno NetBSD->Liux */
-	ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size,
 	    BUS_DMA_WAITOK);
 	if (ret) {
 		DRM_ERROR("failed to load DMA map for stolen object: %d\n",
@@ -569,14 +574,23 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	}
 	loaded = true;
 
-out:	if (ret) {
+out:	kmem_free(seg, nseg * sizeof(seg[0]));
+	if (ret) {
 		if (loaded)
-			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
-		sg_free_table(st);
+			bus_dmamap_unload(dmat, sg->sg_dmamap);
+		if (sg && sg->sg_dmamap)
+			bus_dmamap_destroy(dmat, sg->sg_dmamap);
+		if (sg)
+			sg_free_table(st);
 		kfree(st);
 		return ERR_PTR(ret);
 	}
 #else
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	sg = st->sgl;
 	sg->offset = 0;
 	sg->length = size;
From 7fb48cd42a6dc1e58f9c6b27c7853d721d9f96da Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:38:08 +0000
Subject: [PATCH 1/9] i915_gem_region: Reduce diff from upstream a little.

No functional change intended.
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index 1a928988bd5f..e539dc4530dc 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -63,7 +63,6 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
-	__USE(sg_page_sizes);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
 	bus_dma_segment_t *segs = NULL;
 	int i = 0, nsegs = 0;
@@ -106,7 +105,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	kmem_free(segs, nsegs * sizeof(segs[0]));
 	segs = NULL;
 
-	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg));
+	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
 	st->nents = 0;
 	sg_page_sizes = 0;
@@ -145,9 +144,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	sg_page_sizes |= sg->length;
 	sg_mark_end(sg);
 	i915_sg_trim(st);
+#endif
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
-#endif
 
 	return 0;
 

From 97510e276148cfc6f223b397704b3d81905f8eb0 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:38:38 +0000
Subject: [PATCH 2/9] i915_gem: Avoid walking off end of sg_pgs.

sg_npgs currently fails to match obj->base.size/PAGE_SIZE only due to
bugs in the construction of sg_pgs in various i915 gem object types,
which we should also fix, but let's avoid compounding it here.

Related to PR kern/57833.

XXX pullup-10
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
index 8e592f008ef7..d0265738e2c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
@@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 
 	if (!i915_gem_object_has_pages(obj))
 		return;
-	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+	for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
 		page = obj->mm.pages->sgl->sg_pgs[i];
 		vm_page = &page->p_vmp;
 		pmap_page_protect(vm_page, VM_PROT_NONE);

From 4fabd36d0f4f0b06ff600d91789c56730a6698a7 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Wed, 17 Jan 2024 01:33:22 +0000
Subject: [PATCH 3/9] i915_gem: Assert page array size.

Let's detect the bug of sg_npgs failing to match
obj->base.size/PAGE_SIZE earlier.

Related to PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c      | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
index 58075877254a..2bc7b11695ec 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
@@ -42,6 +42,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 
 #ifndef __NetBSD__
+	/*
+	 * Paranoia: In NetBSD, a scatterlist is just an array of
+	 * pages, not an array of segments that might be larger than
+	 * pages, so the number of entries must exactly match the size
+	 * of the object (which should also be page-aligned).
+	 *
+	 * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in
+	 * i915_gem_mman.c rely on this page array as such.
+	 */
+	KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT,
+	    "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size);
+
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 #endif

From 2911d578f7bb1b45ba50dd0caa08e1d54883befc Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:49:04 +0000
Subject: [PATCH 4/9] i915_gem_phys: Fill sg_pgs.

This is needed by i915 gem fault, which maps user virtual addresses
to those pages, and by i915 gem object destruction, which does
pmap_page_protect on the pages to remove any of those user virtual
mappings.

This needs pmap_kenter_pa rather than pmap_enter(pmap_kernel(), ...)
in order to preserve the _kernel's_ mapping of the pages after
pmap_page_protect.

But bus_dmamem_map currently uses pmap_enter(pmap_kernel(), ...)
instead which creates a mapping that is removed by pmap_page_protect.

So we use a variant of bus_dmamem_map that uses pmap_kenter_pa
instead.  Perhaps bus_dmamem_map should do this itself, but this
change is less risky to pull up than a change to bus_dmamem_map
itself.

PR kern/57833: kernel panic on xorg exit

XXX pullup-10
---
 .../drm2/dist/drm/i915/gem/i915_gem_phys.c    | 89 ++++++++++++++++++-
 1 file changed, 86 insertions(+), 3 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
index 3f68276eea5f..a96e039c21a8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
@@ -24,6 +24,84 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh
 #include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
+#ifdef __NetBSD__
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
+
+/*
+ * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
+ * so that it isn't affected by pmap_page_protect on the physical
+ * address.  Adapted from sys/arch/x86/x86/bus_dma.c.
+ */
+static int
+bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+    size_t size, void **kvap, int flags)
+{
+	vaddr_t va;
+	bus_addr_t addr;
+	int curseg;
+	const uvm_flag_t kmflags =
+	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
+	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
+
+	size = round_page(size);
+	if (flags & BUS_DMA_NOCACHE)
+		pmapflags |= PMAP_NOCACHE;
+
+	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
+
+	if (va == 0)
+		return ENOMEM;
+
+	*kvap = (void *)va;
+
+	for (curseg = 0; curseg < nsegs; curseg++) {
+		for (addr = segs[curseg].ds_addr;
+		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+			if (size == 0)
+				panic("_bus_dmamem_kmap: size botch");
+			pmap_kenter_pa(va, addr,
+			    VM_PROT_READ | VM_PROT_WRITE,
+			    pmapflags);
+		}
+	}
+	pmap_update(pmap_kernel());
+
+	return 0;
+}
+
+static void
+bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
+{
+	pt_entry_t *pte, opte;
+	vaddr_t va, sva, eva;
+
+	KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
+
+	size = round_page(size);
+	sva = (vaddr_t)kva;
+	eva = sva + size;
+
+	/*
+	 * mark pages cacheable again.
+	 */
+	for (va = sva; va < eva; va += PAGE_SIZE) {
+		pte = kvtopte(va);
+		opte = *pte;
+		if ((opte & PTE_PCD) != 0)
+			pmap_pte_clearbits(pte, PTE_PCD);
+	}
+	pmap_kremove((vaddr_t)kva, size);
+	pmap_update(pmap_kernel());
+	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
+}
+
+#endif
+
 #include <linux/nbsd-namespace.h>
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -65,7 +143,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (ret)
 		return -ENOMEM;
 	KASSERT(rsegs == 1);
-	ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
+	ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
 	    roundup_pow_of_two(obj->base.size), &vaddr,
 	    BUS_DMA_WAITOK|BUS_DMA_COHERENT);
 	if (ret)
@@ -83,7 +161,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	if (!st)
 		goto err_pci;
 
+#ifdef __NetBSD__
+	if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
+		GFP_KERNEL))
+#else
 	if (sg_alloc_table(st, 1, GFP_KERNEL))
+#endif
 		goto err_st;
 
 	sg = st->sgl;
@@ -151,7 +234,7 @@ err_st:
 err_pci:
 #ifdef __NetBSD__
 	if (vaddr) {
-		bus_dmamem_unmap(dmat, vaddr,
+		bus_dmamem_kunmap(dmat, vaddr,
 		    roundup_pow_of_two(obj->base.size));
 	}
 	obj->mm.u.phys.kva = NULL;
@@ -225,7 +308,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	kfree(pages);
 
 #ifdef __NetBSD__
-	bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
+	bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
 	    roundup_pow_of_two(obj->base.size));
 	obj->mm.u.phys.kva = NULL;
 	bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);

From 43593f6fbccce1cfe499bbed9822889cc2413e39 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Mon, 15 Jan 2024 22:55:15 +0000
Subject: [PATCH 5/9] i915_gem_region: Fill sg_pgs, with size/PAGE_SIZE
 entries.

Use sg_alloc_table_from_bus_dmamem to do this.

i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset
both rely on sg_pgs to be a page array, so using something else like
size >> ilog2(mem->mm.chunk_size) entries doesn't work.  And they
rely on the sg_pgs entries to be initialized, which we weren't doing
before, and which sg_alloc_table_from_bus_dmamem does for us.

Related to PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_region.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
index e539dc4530dc..72efd363fdc7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
@@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (!st)
 		return -ENOMEM;
 
+#ifndef __NetBSD__
 	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
 		kfree(st);
 		return -ENOMEM;
 	}
+#endif
 
 	flags = I915_ALLOC_MIN_PAGE_SIZE;
 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
@@ -60,7 +62,6 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(list_empty(blocks));
 
-	sg = st->sgl;
 #ifdef __NetBSD__
 	__USE(prev_end);
 	bus_dma_tag_t dmat = obj->base.dev->dmat;
@@ -68,6 +69,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	int i = 0, nsegs = 0;
 	bool loaded = false;
 
+	sg = NULL;
+
 	list_for_each_entry(block, blocks, link) {
 		if (nsegs >= INT_MAX ||
 		    nsegs >= SIZE_MAX/sizeof(segs[0]))
@@ -84,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 		segs[i].ds_addr = mem->region.start + offset;
 		segs[i].ds_len = block_size;
+		i++;
 	}
+	KASSERT(i == nsegs);
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
+	    GFP_KERNEL);
+	if (ret)
+		goto err;
+	sg = st->sgl;
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
@@ -107,6 +118,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 
 	sg_page_sizes = i915_sg_page_sizes(sg);
 #else
+	sg = st->sgl;
 	st->nents = 0;
 	sg_page_sizes = 0;
 	prev_end = (resource_size_t)-1;
@@ -154,6 +166,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 err:
 	if (loaded)
 		bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+	if (sg && sg->sg_dmamap)
+		bus_dmamap_destroy(dmat, sg->sg_dmamap);
 	if (segs)
 		kmem_free(segs, nsegs * sizeof(segs[0]));
 	__intel_memory_region_put_pages_buddy(mem, blocks);

From 57ce19c53c6db50be440740495d8a87f9cef3a9c Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:28:23 +0000
Subject: [PATCH 6/9] i915_gem_stolen: Fix memory leak.

Found while trying to address the PR 57833 class of problems.

XXX pullup-10
---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index b53d543fa894..9ad5df8c75c8 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -569,7 +569,8 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	}
 	loaded = true;
 
-out:	if (ret) {
+out:	kmem_free(seg, nseg * sizeof(seg[0]));
+	if (ret) {
 		if (loaded)
 			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
 		sg_free_table(st);

From 30b321cb3cd94f9b77bab1b409c7fab289a9f128 Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:40:20 +0000
Subject: [PATCH 7/9] i915_gem_stolen: Fill sg_pgs, with size/PAGE_SIZE
 entries.

Use sg_alloc_table_from_bus_dmamem to do this.

i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset
both rely on sg_pgs to be a page array, so providing a table with
only one entry doesn't work (except by accident, if the object is
page-sized anyway).  And they rely on the sg_pgs entries to be
initialized, which we weren't doing before, and which
sg_alloc_table_from_bus_dmamem does for us.

Related to PR kern/57833.

XXX pullup-10
---
 .../drm2/dist/drm/i915/gem/i915_gem_stolen.c  | 29 ++++++++++++++-----
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index 9ad5df8c75c8..1e7398090d11 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 {
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct sg_table *st;
+	struct scatterlist *sg;
 #ifdef __NetBSD__
 	bus_dma_tag_t dmat = i915->drm.dmat;
 	bus_dma_segment_t *seg = NULL;
 	int nseg = 0, i;
 	bool loaded = false;
 	int ret;
-#else
-	struct scatterlist *sg;
 #endif
 
 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
@@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
 #ifdef __NetBSD__
 	KASSERT((size % PAGE_SIZE) == 0);
 	nseg = size / PAGE_SIZE;
@@ -548,6 +542,17 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 		seg[i].ds_len = PAGE_SIZE;
 	}
 
+	sg = NULL;
+
+	ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
+		    ret);
+		ret = -ENOMEM;
+		goto out;
+	}
+	sg = st->sgl;
+
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
 	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
@@ -573,11 +578,19 @@ out:	kmem_free(seg, nseg * sizeof(seg[0]));
 	if (ret) {
 		if (loaded)
 			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
-		sg_free_table(st);
+		if (sg && sg->sg_dmamap)
+			bus_dmamap_destroy(dmat, sg->sg_dmamap);
+		if (sg)
+			sg_free_table(st);
 		kfree(st);
 		return ERR_PTR(ret);
 	}
 #else
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	sg = st->sgl;
 	sg->offset = 0;
 	sg->length = size;

From 74e265bc6fa36862a55163f44788d8963f0689fc Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 16 Jan 2024 00:42:08 +0000
Subject: [PATCH 8/9] i915_gem_stolen: Take advantage of a local variable.

No functional change intended.

Prompted by PR kern/57833.

XXX pullup-10
---
 .../bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c     | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
index 1e7398090d11..1635e57befd7 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
@@ -555,17 +555,17 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 
 	/* XXX errno NetBSD->Linux */
 	ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
-	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
+	    BUS_DMA_WAITOK, &sg->sg_dmamap);
 	if (ret) {
 		DRM_ERROR("failed to create DMA map for stolen object: %d\n",
 		    ret);
-		st->sgl->sg_dmamap = NULL;
+		sg->sg_dmamap = NULL;
 		goto out;
 	}
-	st->sgl->sg_dmat = dmat;
+	sg->sg_dmat = dmat;
 
-	/* XXX errno NetBSD->Liux */
-	ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size,
 	    BUS_DMA_WAITOK);
 	if (ret) {
 		DRM_ERROR("failed to load DMA map for stolen object: %d\n",
@@ -577,7 +577,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 out:	kmem_free(seg, nseg * sizeof(seg[0]));
 	if (ret) {
 		if (loaded)
-			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+			bus_dmamap_unload(dmat, sg->sg_dmamap);
 		if (sg && sg->sg_dmamap)
 			bus_dmamap_destroy(dmat, sg->sg_dmamap);
 		if (sg)

From 28b6c22d35054a2c62eb8bd4d0fb955a00a2517f Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Wed, 17 Jan 2024 01:36:11 +0000
Subject: [PATCH 9/9] WIP: i915: debug bus_dmamem_kmap

---
 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
index a96e039c21a8..07f7825ec0db 100644
--- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
+++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
@@ -47,6 +47,9 @@ bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
 	u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
 
+	printf("%s: t=%p segs=%p nsegs=%d size=0x%zx kvap=%p flags=0x%x\n",
+	    __func__, t, segs, nsegs, size, kvap, flags);
+
 	size = round_page(size);
 	if (flags & BUS_DMA_NOCACHE)
 		pmapflags |= PMAP_NOCACHE;


Home | Main Index | Thread Index | Old Index