Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/arm32 Make bus_dmamem_map aware of coherent dma...



details:   https://anonhg.NetBSD.org/src/rev/1f77e4e34a94
branches:  trunk
changeset: 782163:1f77e4e34a94
user:      matt <matt%NetBSD.org@localhost>
date:      Fri Oct 19 11:57:58 2012 +0000

description:
Make bus_dmamem_map aware of coherent dma regions.  Also, take advantage
of the use/existance of PMAP_MAP_POOLPAGE to avoid redundant mappings.

diffstat:

 sys/arch/arm/arm32/bus_dma.c |  58 +++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 55 insertions(+), 3 deletions(-)

diffs (93 lines):

diff -r 5f503bd4a958 -r 1f77e4e34a94 sys/arch/arm/arm32/bus_dma.c
--- a/sys/arch/arm/arm32/bus_dma.c      Fri Oct 19 11:31:50 2012 +0000
+++ b/sys/arch/arm/arm32/bus_dma.c      Fri Oct 19 11:57:58 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: bus_dma.c,v 1.61 2012/10/17 20:17:18 matt Exp $        */
+/*     $NetBSD: bus_dma.c,v 1.62 2012/10/19 11:57:58 matt Exp $        */
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -33,7 +33,7 @@
 #define _ARM32_BUS_DMA_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.61 2012/10/17 20:17:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.62 2012/10/19 11:57:58 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -706,6 +706,10 @@
 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops, bool readonly_p)
 {
        KASSERT((va & PAGE_MASK) == (pa & PAGE_MASK));
+#if 0
+       printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
+           va, pa, len, ops, readonly_p);
+#endif
 
        switch (ops) {
        case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
@@ -1178,6 +1182,39 @@
            segs, nsegs, (unsigned long)size, flags);
 #endif /* DEBUG_DMA */
 
+#ifdef PMAP_MAP_POOLPAGE
+       /*
+        * If all of memory is mapped, and we are mapping a single physically
+        * contiguous area then this area is already mapped.  Let's see if we
+        * avoid having a separate mapping for it.
+        */
+       if (nsegs == 1) {
+               paddr_t paddr = segs[0].ds_addr;
+               /*
+                * If this is a non-COHERENT mapping, then the existing kernel
+                * mapping is already compatible with it.
+                */
+               if ((flags & _BUS_DMAMAP_COHERENT) == 0) {
+                       *kvap = (void *)PMAP_MAP_POOLPAGE(paddr);
+                       return 0;
+               }
+               /*
+                * This is a COHERENT mapping, which unless this address is in
+                * a COHERENT dma range, will not be compatible.
+                */
+               if (t->_ranges != NULL) {
+                       const struct arm32_dma_range * const dr =
+                           _bus_dma_paddr_inrange(t->_ranges, t->_nranges,
+                               paddr);
+                       if (dr != NULL
+                           && (dr->dr_flags & _BUS_DMAMAP_COHERENT) != 0) {
+                               *kvap = (void *)PMAP_MAP_POOLPAGE(paddr);
+                               return 0;
+                       }
+               }
+       }
+#endif
+
        size = round_page(size);
        va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
 
@@ -1207,7 +1244,22 @@
                         * contain the virtal addresses we are making
                         * uncacheable.
                         */
-                       if (flags & BUS_DMA_COHERENT) {
+
+                       bool uncached = (flags & BUS_DMA_COHERENT);
+                       if (uncached) {
+                               const struct arm32_dma_range * const dr =
+                                   _bus_dma_paddr_inrange(t->_ranges,
+                                       t->_nranges, pa);
+                               /*
+                                * If this dma region is coherent then there is
+                                * no need for an uncached mapping.
+                                */
+                               if (dr != NULL
+                                   && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
+                                       uncached = false;
+                               }
+                       }
+                       if (uncached) {
                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
                                cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);
                                cpu_drain_writebuf();



Home | Main Index | Thread Index | Old Index