Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/rmind-uvmplock]: src/sys Add code, dev_mem_getva() and dev_mem_relva(), ...



details:   https://anonhg.NetBSD.org/src/rev/8df550926187
branches:  rmind-uvmplock
changeset: 753063:8df550926187
user:      rmind <rmind%NetBSD.org@localhost>
date:      Wed Jun 02 03:12:43 2010 +0000

description:
Add code, dev_mem_getva() and dev_mem_relva(), to deal with cache-aliasing
issues by allocating an appropriate KVA from physical address, according to
the colour.  Used by architectures, which have such requirement.  For now,
enable only for MIPS, others will follow.  This renames previously invented
mm_md_getva() and mm_md_relva(), since we do this in MI way, instead of MD.
Architectures just need to define __HAVE_MM_MD_CACHE_ALIASING as indicator.

Reviewed by Matt Thomas.

diffstat:

 sys/arch/mips/include/types.h |   3 +-
 sys/dev/mm.c                  |  56 +++++++++++++++++++++++++++++++++---------
 sys/dev/mm.h                  |   9 ++----
 3 files changed, 49 insertions(+), 19 deletions(-)

diffs (142 lines):

diff -r 6f2dd73fb8c2 -r 8df550926187 sys/arch/mips/include/types.h
--- a/sys/arch/mips/include/types.h     Mon May 31 01:12:13 2010 +0000
+++ b/sys/arch/mips/include/types.h     Wed Jun 02 03:12:43 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: types.h,v 1.45.4.1 2010/03/18 04:36:50 rmind Exp $     */
+/*     $NetBSD: types.h,v 1.45.4.2 2010/06/02 03:12:44 rmind Exp $     */
 
 /*-
  * Copyright (c) 1992, 1993
@@ -145,5 +145,6 @@
 
 #define        __HAVE_MM_MD_DIRECT_MAPPED_PHYS
 #define        __HAVE_MM_MD_KERNACC
+#define        __HAVE_MM_MD_CACHE_ALIASING
 
 #endif /* _MACHTYPES_H_ */
diff -r 6f2dd73fb8c2 -r 8df550926187 sys/dev/mm.c
--- a/sys/dev/mm.c      Mon May 31 01:12:13 2010 +0000
+++ b/sys/dev/mm.c      Wed Jun 02 03:12:43 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: mm.c,v 1.13.16.3 2010/04/25 21:08:45 rmind Exp $       */
+/*     $NetBSD: mm.c,v 1.13.16.4 2010/06/02 03:12:43 rmind Exp $       */
 
 /*-
  * Copyright (c) 2002, 2008, 2010 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.13.16.3 2010/04/25 21:08:45 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.13.16.4 2010/06/02 03:12:43 rmind Exp $");
 
 #include "opt_compat_netbsd.h"
 
@@ -90,13 +90,49 @@
        pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
        KASSERT(pg != 0);
        pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
+       pmap_update(pmap_kernel());
        dev_zero_page = (void *)pg;
 
-#ifndef __HAVE_MM_MD_PREFER_VA
+#ifndef __HAVE_MM_MD_CACHE_ALIASING
        /* KVA for mappings during I/O. */
        dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
            UVM_KMF_VAONLY|UVM_KMF_WAITVA);
        KASSERT(dev_mem_addr != 0);
+#else
+       dev_mem_addr = 0;
+#endif
+}
+
+
+/*
+ * dev_mem_getva: get a special virtual address.  If architecture requires,
+ * allocate VA according to PA, which avoids cache-aliasing issues.  Use a
+ * constant, general mapping address otherwise.
+ */
+static inline vaddr_t
+dev_mem_getva(paddr_t pa)
+{
+#ifdef __HAVE_MM_MD_CACHE_ALIASING
+       const vsize_t coloroff = trunc_page(pa) & ptoa(uvmexp.colormask);
+       const vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE + coloroff,
+           ptoa(uvmexp.ncolors), UVM_KMF_VAONLY | UVM_KMF_WAITVA);
+
+       return kva + coloroff;
+#else
+       return dev_mem_addr;
+#endif
+}
+
+static inline void
+dev_mem_relva(paddr_t pa, vaddr_t va)
+{
+#ifdef __HAVE_MM_MD_CACHE_ALIASING
+       const vsize_t coloroff = trunc_page(pa) & ptoa(uvmexp.colormask);
+       const vaddr_t origva = va - coloroff;
+
+       uvm_km_free(kernel_map, origva, PAGE_SIZE + coloroff, UVM_KMF_VAONLY);
+#else
+       KASSERT(dev_mem_addr == va);
 #endif
 }
 
@@ -133,12 +169,9 @@
        have_direct = false;
 #endif
        if (!have_direct) {
-#ifndef __HAVE_MM_MD_PREFER_VA
-               const vaddr_t va = dev_mem_addr;
-#else
                /* Get a special virtual address. */
-               const vaddr_t va = mm_md_getva(paddr);
-#endif
+               const vaddr_t va = dev_mem_getva(paddr);
+
                /* Map selected KVA to physical address. */
                mutex_enter(&dev_mem_lock);
                pmap_kenter_pa(va, paddr, prot, 0);
@@ -148,14 +181,13 @@
                vaddr = va + offset;
                error = uiomove((void *)vaddr, len, uio);
 
-               /* Unmap.  Note: no need for pmap_update(). */
+               /* Unmap, flush before unlock. */
                pmap_kremove(va, PAGE_SIZE);
+               pmap_update(pmap_kernel());
                mutex_exit(&dev_mem_lock);
 
-#ifdef __HAVE_MM_MD_PREFER_VA
                /* "Release" the virtual address. */
-               mm_md_relva(va);
-#endif
+               dev_mem_relva(paddr, va);
        } else {
                /* Direct map, just perform I/O. */
                vaddr += offset;
diff -r 6f2dd73fb8c2 -r 8df550926187 sys/dev/mm.h
--- a/sys/dev/mm.h      Mon May 31 01:12:13 2010 +0000
+++ b/sys/dev/mm.h      Wed Jun 02 03:12:43 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: mm.h,v 1.1.2.2 2010/04/25 15:27:35 rmind Exp $ */
+/*     $NetBSD: mm.h,v 1.1.2.3 2010/06/02 03:12:43 rmind Exp $ */
 
 /*-
  * Copyright (c) 2008 Joerg Sonnenberger <joerg%NetBSD.org@localhost>.
@@ -87,12 +87,9 @@
 bool   mm_md_direct_mapped_io(void *, paddr_t *);
 
 /*
- * Optional hooks to select and release a special virtual address,
- * in order to avoid cache aliasing issues on certain architectures.
+ * Some architectures may need to deal with cache aliasing issues.
  *
- * machine/types.h must define __HAVE_MM_MD_PREFER_VA to use this.
+ * machine/types.h must define __HAVE_MM_MD_CACHE_ALIASING to note that.
  */
-vaddr_t        mm_md_getva(paddr_t);
-void   mm_md_relva(vaddr_t);
 
 #endif /* _SYS_DEV_MM_H_ */



Home | Main Index | Thread Index | Old Index