NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: kern/57833: kernel panic on xorg exit



The following reply was made to PR kern/57833; it has been noted by GNATS.

From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
To: Ramiro Aceves <ea1abz%gmail.com@localhost>,
	"David H. Gutteridge" <david%gutteridge.ca@localhost>
Cc: gnats-bugs%NetBSD.org@localhost, netbsd-bugs%NetBSD.org@localhost
Subject: Re: kern/57833: kernel panic on xorg exit
Date: Mon, 15 Jan 2024 17:19:21 +0000

 This is a multi-part message in MIME format.
 --=_doTlMKKkYbLBtme6fab/51tYZGWhbizH
 
 Small update to previous patch -- if we use pmap_kenter_pa then we
 need to use pmap_kremove later on, not pmap_remove.
 
 --=_doTlMKKkYbLBtme6fab/51tYZGWhbizH
 Content-Type: text/plain; charset="ISO-8859-1"; name="pr57833-i915gempages-v4"
 Content-Transfer-Encoding: quoted-printable
 Content-Disposition: attachment; filename="pr57833-i915gempages-v4.patch"
 
 From cc07bce236934283926a18f9ba19e47e4d29869a Mon Sep 17 00:00:00 2001
 From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
 Date: Sun, 14 Jan 2024 22:35:09 +0000
 Subject: [PATCH] i915: Ensure every get_pages method fills the sg pages.
 
 This is needed by i915 gem fault, which maps user virtual addresses
 to those pages, and by i915 gem object destruction, which does
 pmap_page_protect on the pages to remove any of those user virtual
 mappings.
 
 Note: This requires i915_gem_phys.c to use pmap_kenter_pa to preserve
 the _kernel's_ mapping of the pages after pmap_page_protect, but
 bus_dmamem_map currently uses pmap_enter(pmap_kernel(), ...) instead
 which creates a mapping that is removed by pmap_page_protect.  So we
 use a variant of bus_dmamem_map that uses pmap_kenter_pa instead --
 perhaps bus_dmamem_map should do this itself, but this change is less
 risky to pull up than a change to bus_dmamem_map itself.
 
 While here, make iteration over sg_pgs a little more robust, and
 reduce diff from upstream where convenient.
 
 PR kern/57833
 
 XXX pullup-10
 ---
  .../drm2/dist/drm/i915/gem/i915_gem_mman.c    |  2 +-
  .../drm2/dist/drm/i915/gem/i915_gem_pages.c   |  9 ++
  .../drm2/dist/drm/i915/gem/i915_gem_phys.c    | 89 ++++++++++++++++++-
  .../drm2/dist/drm/i915/gem/i915_gem_region.c  | 23 +++--
  .../drm2/dist/drm/i915/gem/i915_gem_stolen.c  | 44 +++++----
  5 files changed, 143 insertions(+), 24 deletions(-)
 
 diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/=
 external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
 index 8e592f008ef7..d0265738e2c8 100644
 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
 +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
 @@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i91=
 5_gem_object *obj)
 =20
  	if (!i915_gem_object_has_pages(obj))
  		return;
 -	for (i =3D 0; i < obj->base.size >> PAGE_SHIFT; i++) {
 +	for (i =3D 0; i < obj->mm.pages->sgl->sg_npgs; i++) {
  		page =3D obj->mm.pages->sgl->sg_pgs[i];
  		vm_page =3D &page->p_vmp;
  		pmap_page_protect(vm_page, VM_PROT_NONE);
 diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys=
 /external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
 index 58075877254a..32c188352dbb 100644
 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
 +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
 @@ -42,6 +42,15 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_obj=
 ect *obj,
  	}
 =20
  #ifndef __NetBSD__
 +	/*
 +	 * Paranoia: In NetBSD, a scatterlist is just an array of
 +	 * pages, not an array of segments that might be larger than
 +	 * pages, so the number of entries must exactly match the size
 +	 * of the object (which should also be page-aligned).
 +	 */
 +	KASSERTMSG(pages->sgl->sg_npgs =3D=3D obj->base.size >> PAGE_SHIFT,
 +	    "npgs=3D%zu size=3D%zu", pages->sgl->sg_npgs, obj->base.size);
 +
  	obj->mm.get_page.sg_pos =3D pages->sgl;
  	obj->mm.get_page.sg_idx =3D 0;
  #endif
 diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/=
 external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
 index 3f68276eea5f..a5aec14d48a4 100644
 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
 +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
 @@ -24,6 +24,84 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/1=
 2/19 12:45:43 riastradh
  #include "i915_gem_region.h"
  #include "i915_scatterlist.h"
 =20
 +#ifdef __NetBSD__
 +
 +#include <uvm/uvm.h>
 +#include <uvm/uvm_extern.h>
 +
 +#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
 +
 +/*
 + * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
 + * so that it isn't affected by pmap_page_protect on the physical
 + * address.  Adapted from sys/arch/x86/x86/bus_dma.c.
 + */
 +static int
 +bus_dmamem_kmap(bus_dma_tag_t dmat, const bus_dma_segment_t *segs, int nse=
 gs,
 +    size_t size, void **kvap, int flags)
 +{
 +	vaddr_t va;
 +	bus_addr_t addr;
 +	int curseg;
 +	const uvm_flag_t kmflags =3D
 +	    (flags & BUS_DMA_NOWAIT) !=3D 0 ? UVM_KMF_NOWAIT : 0;
 +	u_int pmapflags =3D PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
 +
 +	size =3D round_page(size);
 +	if (flags & BUS_DMA_NOCACHE)
 +		pmapflags |=3D PMAP_NOCACHE;
 +
 +	va =3D uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
 +
 +	if (va =3D=3D 0)
 +		return ENOMEM;
 +
 +	*kvap =3D (void *)va;
 +
 +	for (curseg =3D 0; curseg < nsegs; curseg++) {
 +		for (addr =3D segs[curseg].ds_addr;
 +		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
 +		    addr +=3D PAGE_SIZE, va +=3D PAGE_SIZE, size -=3D PAGE_SIZE) {
 +			if (size =3D=3D 0)
 +				panic("_bus_dmamem_map: size botch");
 +			pmap_kenter_pa(va, addr,
 +			    VM_PROT_READ | VM_PROT_WRITE,
 +			    pmapflags);
 +		}
 +	}
 +	pmap_update(pmap_kernel());
 +
 +	return 0;
 +}
 +
 +static void
 +bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
 +{
 +	pt_entry_t *pte, opte;
 +	vaddr_t va, sva, eva;
 +
 +	KASSERTMSG(((uintptr_t)kva & PGOFSET) =3D=3D 0, "kva=3D%p", kva);
 +
 +	size =3D round_page(size);
 +	sva =3D (vaddr_t)kva;
 +	eva =3D sva + size;
 +
 +	/*
 +	 * mark pages cacheable again.
 +	 */
 +	for (va =3D sva; va < eva; va +=3D PAGE_SIZE) {
 +		pte =3D kvtopte(va);
 +		opte =3D *pte;
 +		if ((opte & PTE_PCD) !=3D 0)
 +			pmap_pte_clearbits(pte, PTE_PCD);
 +	}
 +	pmap_kremove((vaddr_t)kva, size);
 +	pmap_update(pmap_kernel());
 +	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
 +}
 +
 +#endif
 +
  #include <linux/nbsd-namespace.h>
 =20
  static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 @@ -65,7 +143,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i91=
 5_gem_object *obj)
  	if (ret)
  		return -ENOMEM;
  	KASSERT(rsegs =3D=3D 1);
 -	ret =3D -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
 +	ret =3D -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
  	    roundup_pow_of_two(obj->base.size), &vaddr,
  	    BUS_DMA_WAITOK|BUS_DMA_COHERENT);
  	if (ret)
 @@ -83,7 +161,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i9=
 15_gem_object *obj)
  	if (!st)
  		goto err_pci;
 =20
 +#ifdef __NetBSD__
 +	if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
 +		GFP_KERNEL))
 +#else
  	if (sg_alloc_table(st, 1, GFP_KERNEL))
 +#endif
  		goto err_st;
 =20
  	sg =3D st->sgl;
 @@ -151,7 +234,7 @@ err_st:
  err_pci:
  #ifdef __NetBSD__
  	if (vaddr) {
 -		bus_dmamem_unmap(dmat, vaddr,
 +		bus_dmamem_kunmap(dmat, vaddr,
  		    roundup_pow_of_two(obj->base.size));
  	}
  	obj->mm.u.phys.kva =3D NULL;
 @@ -225,7 +308,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_obje=
 ct *obj,
  	kfree(pages);
 =20
  #ifdef __NetBSD__
 -	bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
 +	bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
  	    roundup_pow_of_two(obj->base.size));
  	obj->mm.u.phys.kva =3D NULL;
  	bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);
 diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sy=
 s/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
 index 1a928988bd5f..85a1de8cb7ea 100644
 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
 +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c
 @@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_obj=
 ect *obj)
  	if (!st)
  		return -ENOMEM;
 =20
 +#ifndef __NetBSD__
  	if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
  		kfree(st);
  		return -ENOMEM;
  	}
 +#endif
 =20
  	flags =3D I915_ALLOC_MIN_PAGE_SIZE;
  	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
 @@ -60,15 +62,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_obj=
 ect *obj)
 =20
  	GEM_BUG_ON(list_empty(blocks));
 =20
 -	sg =3D st->sgl;
  #ifdef __NetBSD__
  	__USE(prev_end);
 -	__USE(sg_page_sizes);
  	bus_dma_tag_t dmat =3D obj->base.dev->dmat;
  	bus_dma_segment_t *segs =3D NULL;
  	int i =3D 0, nsegs =3D 0;
  	bool loaded =3D false;
 =20
 +	sg =3D NULL;
 +
  	list_for_each_entry(block, blocks, link) {
  		if (nsegs >=3D INT_MAX ||
  		    nsegs >=3D SIZE_MAX/sizeof(segs[0]))
 @@ -85,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_obje=
 ct *obj)
 =20
  		segs[i].ds_addr =3D mem->region.start + offset;
  		segs[i].ds_len =3D block_size;
 +		i++;
  	}
 +	KASSERT(i =3D=3D nsegs);
 +
 +	ret =3D sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
 +	    GFP_KERNEL);
 +	if (ret)
 +		goto err;
 +	sg =3D st->sgl;
 =20
  	/* XXX errno NetBSD->Linux */
  	ret =3D -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
 @@ -106,8 +116,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_obj=
 ect *obj)
  	kmem_free(segs, nsegs * sizeof(segs[0]));
  	segs =3D NULL;
 =20
 -	__i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg));
 +	sg_page_sizes =3D i915_sg_page_sizes(sg);
  #else
 +	sg =3D st->sgl;
  	st->nents =3D 0;
  	sg_page_sizes =3D 0;
  	prev_end =3D (resource_size_t)-1;
 @@ -145,16 +156,18 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_o=
 bject *obj)
  	sg_page_sizes |=3D sg->length;
  	sg_mark_end(sg);
  	i915_sg_trim(st);
 +#endif
 =20
  	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 -#endif
 =20
  	return 0;
 =20
  #ifdef __NetBSD__
  err:
  	if (loaded)
 -		bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
 +		bus_dmamap_unload(dmat, sg->sg_dmamap);
 +	if (sg && sg->sg_dmamap)
 +		bus_dmamap_destroy(dmat, sg->sg_dmamap);
  	if (segs)
  		kmem_free(segs, nsegs * sizeof(segs[0]));
  	__intel_memory_region_put_pages_buddy(mem, blocks);
 diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sy=
 s/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
 index b53d543fa894..1635e57befd7 100644
 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
 +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
 @@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev,
  {
  	struct drm_i915_private *i915 =3D to_i915(dev);
  	struct sg_table *st;
 +	struct scatterlist *sg;
  #ifdef __NetBSD__
  	bus_dma_tag_t dmat =3D i915->drm.dmat;
  	bus_dma_segment_t *seg =3D NULL;
  	int nseg =3D 0, i;
  	bool loaded =3D false;
  	int ret;
 -#else
 -	struct scatterlist *sg;
  #endif
 =20
  	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
 @@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev,
  	if (st =3D=3D NULL)
  		return ERR_PTR(-ENOMEM);
 =20
 -	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 -		kfree(st);
 -		return ERR_PTR(-ENOMEM);
 -	}
 -
  #ifdef __NetBSD__
  	KASSERT((size % PAGE_SIZE) =3D=3D 0);
  	nseg =3D size / PAGE_SIZE;
 @@ -548,19 +542,30 @@ i915_pages_create_for_stolen(struct drm_device *dev,
  		seg[i].ds_len =3D PAGE_SIZE;
  	}
 =20
 +	sg =3D NULL;
 +
 +	ret =3D sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
 +	if (ret) {
 +		DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
 +		    ret);
 +		ret =3D -ENOMEM;
 +		goto out;
 +	}
 +	sg =3D st->sgl;
 +
  	/* XXX errno NetBSD->Linux */
  	ret =3D -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
 -	    BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
 +	    BUS_DMA_WAITOK, &sg->sg_dmamap);
  	if (ret) {
  		DRM_ERROR("failed to create DMA map for stolen object: %d\n",
  		    ret);
 -		st->sgl->sg_dmamap =3D NULL;
 +		sg->sg_dmamap =3D NULL;
  		goto out;
  	}
 -	st->sgl->sg_dmat =3D dmat;
 +	sg->sg_dmat =3D dmat;
 =20
 -	/* XXX errno NetBSD->Liux */
 -	ret =3D -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
 +	/* XXX errno NetBSD->Linux */
 +	ret =3D -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size,
  	    BUS_DMA_WAITOK);
  	if (ret) {
  		DRM_ERROR("failed to load DMA map for stolen object: %d\n",
 @@ -569,14 +574,23 @@ i915_pages_create_for_stolen(struct drm_device *dev,
  	}
  	loaded =3D true;
 =20
 -out:	if (ret) {
 +out:	kmem_free(seg, nseg * sizeof(seg[0]));
 +	if (ret) {
  		if (loaded)
 -			bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
 -		sg_free_table(st);
 +			bus_dmamap_unload(dmat, sg->sg_dmamap);
 +		if (sg && sg->sg_dmamap)
 +			bus_dmamap_destroy(dmat, sg->sg_dmamap);
 +		if (sg)
 +			sg_free_table(st);
  		kfree(st);
  		return ERR_PTR(ret);
  	}
  #else
 +	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 +		kfree(st);
 +		return ERR_PTR(-ENOMEM);
 +	}
 +
  	sg =3D st->sgl;
  	sg->offset =3D 0;
  	sg->length =3D size;
 
 --=_doTlMKKkYbLBtme6fab/51tYZGWhbizH--
 


Home | Main Index | Thread Index | Old Index