Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Add fast softint(9) support for sparc64.



details:   https://anonhg.NetBSD.org/src/rev/9995027a1c7f
branches:  trunk
changeset: 766198:9995027a1c7f
user:      nakayama <nakayama%NetBSD.org@localhost>
date:      Sat Jun 18 18:51:17 2011 +0000

description:
Add fast softint(9) support for sparc64.

Reviewed on port-sparc64.

diffstat:

 sys/arch/sparc/include/types.h       |    7 +-
 sys/arch/sparc64/sparc64/cpu.c       |    5 +-
 sys/arch/sparc64/sparc64/genassym.cf |    6 +-
 sys/arch/sparc64/sparc64/intr.c      |   47 ++++++++++-
 sys/arch/sparc64/sparc64/locore.s    |  147 +++++++++++++++++++++++++++++++++-
 sys/arch/sparc64/sparc64/pmap.c      |    5 +-
 6 files changed, 202 insertions(+), 15 deletions(-)

diffs (truncated from 401 to 300 lines):

diff -r 49d294f88e19 -r 9995027a1c7f sys/arch/sparc/include/types.h
--- a/sys/arch/sparc/include/types.h    Sat Jun 18 18:43:41 2011 +0000
+++ b/sys/arch/sparc/include/types.h    Sat Jun 18 18:51:17 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: types.h,v 1.58 2011/06/12 03:35:46 rmind Exp $ */
+/*     $NetBSD: types.h,v 1.59 2011/06/18 18:51:17 nakayama Exp $ */
 
 /*
  * Copyright (c) 1992, 1993
@@ -51,6 +51,8 @@
 #include "opt_sparc_arch.h"
 #endif
 
+#ifndef _LOCORE
+
 #include <sys/cdefs.h>
 #include <sys/featuretest.h>
 #include <machine/int_types.h>
@@ -113,6 +115,8 @@
 #define        __SIMPLELOCK_LOCKED     0xff
 #define        __SIMPLELOCK_UNLOCKED   0
 
+#endif /* _LOCORE */
+
 #define        __HAVE_DEVICE_REGISTER
 #define        __HAVE_SYSCALL_INTERN
 #define        __GENERIC_SOFT_INTERRUPTS_ALL_LEVELS
@@ -122,6 +126,7 @@
 #define        __HAVE_DEVICE_REGISTER_POSTCONFIG
 #define        __HAVE_ATOMIC64_OPS
 #define        __HAVE_CPU_COUNTER      /* sparc v9 CPUs have %tick */
+#define        __HAVE_FAST_SOFTINTS
 #if defined(_KERNEL)
 #define        __HAVE_RAS
 #endif
diff -r 49d294f88e19 -r 9995027a1c7f sys/arch/sparc64/sparc64/cpu.c
--- a/sys/arch/sparc64/sparc64/cpu.c    Sat Jun 18 18:43:41 2011 +0000
+++ b/sys/arch/sparc64/sparc64/cpu.c    Sat Jun 18 18:51:17 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.c,v 1.98 2010/11/06 11:46:03 uebayasi Exp $ */
+/*     $NetBSD: cpu.c,v 1.99 2011/06/18 18:51:18 nakayama Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -52,7 +52,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.98 2010/11/06 11:46:03 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.99 2011/06/18 18:51:18 nakayama Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -173,6 +173,7 @@
        cpi->ci_curlwp = NULL;
        cpi->ci_cpuid = portid;
        cpi->ci_fplwp = NULL;
+       cpi->ci_eintstack = NULL;
        cpi->ci_spinup = NULL;
        cpi->ci_paddr = pa0;
        cpi->ci_self = cpi;
diff -r 49d294f88e19 -r 9995027a1c7f sys/arch/sparc64/sparc64/genassym.cf
--- a/sys/arch/sparc64/sparc64/genassym.cf      Sat Jun 18 18:43:41 2011 +0000
+++ b/sys/arch/sparc64/sparc64/genassym.cf      Sat Jun 18 18:51:17 2011 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: genassym.cf,v 1.64 2011/01/14 02:06:32 rmind Exp $
+#      $NetBSD: genassym.cf,v 1.65 2011/06/18 18:51:18 nakayama Exp $
 
 #
 # Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -112,6 +112,7 @@
 define PAGE_SIZE       PAGE_SIZE
 
 # Important offsets into the lwp and proc structs & associated constants
+define L_CTXSWTCH              offsetof(struct lwp, l_ctxswtch)
 define L_PCB                   offsetof(struct lwp, l_addr)
 define L_PROC                  offsetof(struct lwp, l_proc)
 define L_TF                    offsetof(struct lwp, l_md.md_tf)
@@ -146,11 +147,12 @@
 define CI_NUMBER       offsetof(struct cpu_info, ci_data.cpu_index)
 define CI_FPLWP        offsetof(struct cpu_info, ci_fplwp)
 define CI_UPAID        offsetof(struct cpu_info, ci_cpuid)
+define CI_MTX_COUNT    offsetof(struct cpu_info, ci_mtx_count)
 define CI_SPINUP       offsetof(struct cpu_info, ci_spinup)
 define CI_PADDR        offsetof(struct cpu_info, ci_paddr)
 define CI_WANT_AST     offsetof(struct cpu_info, ci_want_ast)
 define CI_WANT_RESCHED offsetof(struct cpu_info, ci_want_resched)
-define CI_EINTRSTACK   offsetof(struct cpu_info, ci_eintstack)
+define CI_EINTSTACK    offsetof(struct cpu_info, ci_eintstack)
 define CI_IDLELWP      offsetof(struct cpu_info, ci_data.cpu_idlelwp)
 define CI_NFAULT       offsetof(struct cpu_info, ci_data.cpu_nfault)
 define CI_NINTR        offsetof(struct cpu_info, ci_data.cpu_nintr)
diff -r 49d294f88e19 -r 9995027a1c7f sys/arch/sparc64/sparc64/intr.c
--- a/sys/arch/sparc64/sparc64/intr.c   Sat Jun 18 18:43:41 2011 +0000
+++ b/sys/arch/sparc64/sparc64/intr.c   Sat Jun 18 18:51:17 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: intr.c,v 1.62 2009/12/03 05:06:16 mrg Exp $ */
+/*     $NetBSD: intr.c,v 1.63 2011/06/18 18:51:18 nakayama Exp $ */
 
 /*
  * Copyright (c) 1992, 1993
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.62 2009/12/03 05:06:16 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.63 2011/06/18 18:51:18 nakayama Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -269,3 +269,46 @@
 
        send_softint(-1, ih->ih_pil, ih);
 }
+
+#ifdef __HAVE_FAST_SOFTINTS
+/*
+ * MD implementation of FAST software interrupt framework
+ */
+
+int softint_fastintr(void *);
+
+void
+softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
+{
+       struct intrhand *ih;
+       int pil;
+
+       switch (level) {
+       case SOFTINT_BIO:
+               pil = IPL_SOFTBIO;
+               break;
+       case SOFTINT_NET:
+               pil = IPL_SOFTNET;
+               break;
+       case SOFTINT_SERIAL:
+               pil = IPL_SOFTSERIAL;
+               break;
+       case SOFTINT_CLOCK:
+               pil = IPL_SOFTCLOCK;
+               break;
+       default:
+               panic("softint_init_md");
+       }
+
+       ih = sparc_softintr_establish(pil, softint_fastintr, l);
+       *machdep = (uintptr_t)ih;
+}
+
+void
+softint_trigger(uintptr_t machdep)
+{
+       struct intrhand *ih = (struct intrhand *)machdep;
+
+       send_softint(-1, ih->ih_pil, ih);
+}
+#endif /* __HAVE_FAST_SOFTINTS */
diff -r 49d294f88e19 -r 9995027a1c7f sys/arch/sparc64/sparc64/locore.s
--- a/sys/arch/sparc64/sparc64/locore.s Sat Jun 18 18:43:41 2011 +0000
+++ b/sys/arch/sparc64/sparc64/locore.s Sat Jun 18 18:51:17 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.s,v 1.333 2011/05/12 05:43:54 mrg Exp $ */
+/*     $NetBSD: locore.s,v 1.334 2011/06/18 18:51:18 nakayama Exp $    */
 
 /*
  * Copyright (c) 2006-2010 Matthew R. Green
@@ -75,6 +75,7 @@
 
 #include "assym.h"
 #include <machine/param.h>
+#include <machine/types.h>
 #include <sparc64/sparc64/intreg.h>
 #include <sparc64/sparc64/timerreg.h>
 #include <machine/ctlreg.h>
@@ -1171,11 +1172,16 @@
        xor     %g7, WSTATE_KERN, %g3;                          /* Are we on the user stack ? */ \
        \
        sra     %g5, 0, %g5;                                    /* Sign extend the damn thing */ \
-       or      %g3, %g4, %g4;                                  /* Definitely not off the interrupt stack */ \
+       orcc    %g3, %g4, %g0;                                  /* Definitely not off the interrupt stack */ \
        \
-       movrz   %g4, %sp, %g6; \
+       sethi   %hi(CPUINFO_VA + CI_EINTSTACK), %g4; \
+       bz,a,pt %xcc, 1f; \
+        mov    %sp, %g6; \
        \
-       add     %g6, %g5, %g5;                                  /* Allocate a stack frame */ \
+       ldx     [%g4 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g4; \
+       movrnz  %g4, %g4, %g6;                                  /* Use saved intr stack if exists */ \
+       \
+1:     add     %g6, %g5, %g5;                                  /* Allocate a stack frame */ \
        btst    1, %g6; \
        bnz,pt  %icc, 1f; \
 \
@@ -1275,8 +1281,11 @@
        or      %g5, %lo((stackspace)), %g5; \
        sub     %g1, %g6, %g2;                                  /* Determine if we need to switch to intr stack or not */ \
        dec     %g7;                                            /* Make it into a mask */ \
+       sethi   %hi(CPUINFO_VA + CI_EINTSTACK), %g3; \
        andncc  %g2, %g7, %g0;                                  /* XXXXXXXXXX This assumes kernel addresses are unique from user addresses */ \
+       LDPTR   [%g3 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g3; \
        rdpr    %wstate, %g7;                                   /* Find if we're from user mode */ \
+       movrnz  %g3, %g3, %g1;                                  /* Use saved intr stack if exists */ \
        sra     %g5, 0, %g5;                                    /* Sign extend the damn thing */ \
        movnz   %xcc, %g1, %g6;                                 /* Stay on interrupt stack? */ \
        cmp     %g7, WSTATE_KERN;                               /* User or kernel sp? */ \
@@ -3375,11 +3384,32 @@
        CASPTR  [%l4] ASI_N, %l2, %l7   ! Grab the entire list
        cmp     %l7, %l2
        bne,pn  CCCR, 1b
-        .empty
+        add    %sp, CC64FSZ+STKB, %o2  ! tf = %sp + CC64FSZ + STKB
+       LDPTR   [%l2 + IH_PEND], %l7
+       cmp     %l7, -1                 ! Last slot?
+       be,pt   CCCR, 3f
+        membar #LoadStore
+
+       /*
+        * Reverse a pending list since setup_sparcintr/send_softint
+        * makes it in a LIFO order.
+        */
+       mov     -1, %o0                 ! prev = -1
+1:     STPTR   %o0, [%l2 + IH_PEND]    ! ih->ih_pending = prev
+       mov     %l2, %o0                ! prev = ih
+       mov     %l7, %l2                ! ih = ih->ih_pending
+       LDPTR   [%l2 + IH_PEND], %l7
+       cmp     %l7, -1                 ! Last slot?
+       bne,pn  CCCR, 1b
+        membar #LoadStore
+       ba,pt   CCCR, 3f
+        mov    %o0, %l7                ! save ih->ih_pending
+
 2:
        add     %sp, CC64FSZ+STKB, %o2  ! tf = %sp + CC64FSZ + STKB
        LDPTR   [%l2 + IH_PEND], %l7    ! save ih->ih_pending
        membar  #LoadStore
+3:
        STPTR   %g0, [%l2 + IH_PEND]    ! Clear pending flag
        membar  #Sync
        LDPTR   [%l2 + IH_FUN], %o4     ! ih->ih_fun
@@ -5075,6 +5105,7 @@
  * Arguments:
  *     i0      'struct lwp *' of the current LWP
  *     i1      'struct lwp *' of the LWP to switch to
+ *     i2      'bool' of the flag returning to a softint LWP or not
  * Returns:
  *     the old lwp switched away from
  */
@@ -5090,6 +5121,7 @@
         *      %l7 = %hi(CURLWP)
         *      %i0 = oldlwp
         *      %i1 = lwp
+        *      %i2 = returning
         *      %o0 = tmp 1
         *      %o1 = tmp 2
         *      %o2 = tmp 3
@@ -5136,7 +5168,9 @@
        and     %o3, CWP, %o3
        wrpr    %g0, %o3, %cleanwin
        dec     1, %o3                                  ! NWINDOWS-1-1
-       wrpr    %o3, %cansave
+       /* Skip the rest if returning to a interrupted LWP. */
+       brnz,pn %i2, Lsw_noras
+        wrpr   %o3, %cansave
 
        /* finally, enable traps */
        wrpr    %g0, PSTATE_INTR, %pstate
@@ -5173,6 +5207,107 @@
        ret
         restore %i0, %g0, %o0                          ! return old curlwp
 
+#ifdef __HAVE_FAST_SOFTINTS
+/*
+ * Switch to the LWP assigned to handle interrupts from the given
+ * source.  We borrow the VM context from the interrupted LWP.
+ *
+ * int softint_fastintr(void *l)
+ *
+ * Arguments:
+ *     i0      softint lwp
+ */
+ENTRY(softint_fastintr)
+       save    %sp, -CC64FSZ, %sp
+       set     CPUINFO_VA, %l0                 ! l0 = curcpu()
+       rdpr    %pil, %l7                       ! l7 = splhigh()
+       wrpr    %g0, PIL_HIGH, %pil
+       ld      [%l0 + CI_IDEPTH], %l1
+       LDPTR   [%l0 + CI_EINTSTACK], %l6       ! l6 = ci_eintstack
+       dec     %l1
+       add     %sp, -CC64FSZ, %l2              ! ci_eintstack = sp - CC64FSZ
+       st      %l1, [%l0 + CI_IDEPTH]          ! adjust ci_idepth
+       STPTR   %l2, [%l0 + CI_EINTSTACK]       ! save intstack for nexted intr
+
+       mov     %i0, %o0                        ! o0/i0 = softint lwp
+       mov     %l7, %o1                        ! o1/i1 = ipl
+       save    %sp, -CC64FSZ, %sp              ! make one more register window
+       flushw                                  ! and save all
+
+       sethi   %hi(CURLWP), %l7
+       sethi   %hi(CPCB), %l6



Home | Main Index | Thread Index | Old Index