Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/nathanw_sa]: src/sys/arch/arm/arm32 Back out the changes that implement ...



details:   https://anonhg.NetBSD.org/src/rev/3c885ed93181
branches:  nathanw_sa
changeset: 506115:3c885ed93181
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Mon Aug 05 19:51:45 2002 +0000

description:
Back out the changes that implement the scheduler locking protocol.
The register usage in this file is very different than on the trunk,
and so the changes made to the trunk don't really apply here.

Fix up some comments while here.

diffstat:

 sys/arch/arm/arm32/cpuswitch.S |  87 +++++++++++++----------------------------
 1 files changed, 27 insertions(+), 60 deletions(-)

diffs (201 lines):

diff -r 6bf17a0633c6 -r 3c885ed93181 sys/arch/arm/arm32/cpuswitch.S
--- a/sys/arch/arm/arm32/cpuswitch.S    Mon Aug 05 18:18:06 2002 +0000
+++ b/sys/arch/arm/arm32/cpuswitch.S    Mon Aug 05 19:51:45 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpuswitch.S,v 1.3.2.9 2002/06/24 22:03:50 nathanw Exp $        */
+/*     $NetBSD: cpuswitch.S,v 1.3.2.10 2002/08/05 19:51:45 thorpej Exp $       */
 
 /*
  * Copyright (c) 1994-1998 Mark Brinicombe.
@@ -233,33 +233,29 @@
  */
 /* LINTSTUB: Ignore */
 ASENTRY_NP(idle)
-
-#if defined(LOCKDEBUG)
-       bl      _C_LABEL(sched_unlock_idle)
-#endif
        /* Enable interrupts */
        IRQenable
 
+       /* XXX - r1 needs to be preserved for cpu_switch */
+       mov     r7, r1
        ldr     r3, Lcpufuncs
        mov     r0, #0
        add     lr, pc, #Lidle_slept - . - 8
        ldr     pc, [r3, #CF_SLEEP]
 
-       /* should also call the uvm pageidlezero stuff */
-
 Lidle_slept:
+       mov     r1, r7
 
        /* Disable interrupts while we check for an active queue */
        IRQdisable
-#if defined(LOCKDEBUG)
-       bl      _C_LABEL(sched_lock_idle)
-#endif
        ldr     r7, Lwhichqs
        ldr     r3, [r7]
        teq     r3, #0x00000000
+       bne     sw1
 
-       beq     _ASM_LABEL(idle)
-       b       Lidle_ret
+       /* All processes are still asleep so idle a while longer */
+       b       _ASM_LABEL(idle)
+
 
 /*
  * Find a new lwp to run, save the current context and
@@ -295,15 +291,9 @@
        ldr     r7, Lcurpcb
        str     r0, [r7]
 
-       /* stash the old proc */
+       /* Lower the spl level to spl0 and get the current spl level. */
        mov     r7, r1
 
-#if defined(LOCKDEBUG)
-       /* release the sched_lock before handling interrupts */
-       bl      _C_LABEL(sched_unlock_idle)
-#endif
-
-       /* Lower the spl level to spl0 and get the current spl level. */
 #ifdef __NEWINTR
        mov     r0, #(IPL_NONE)
        bl      _C_LABEL(_spllower)
@@ -319,19 +309,14 @@
        /* Push the old spl level onto the stack */
        str     r0, [sp, #-0x0004]!
 
-       mov     r5, r7
+       mov     r1, r7
 
        /* First phase : find a new lwp */
 
-       /* rem: r5 = old lwp */
-
+       /* rem: r1 = old proc */
 
 Lswitch_search:
        IRQdisable
-#if defined(LOCKDEBUG)
-       bl      _C_LABEL(sched_lock_idle)
-#endif
-
 
        /* Do we have any active queues  */
        ldr     r7, Lwhichqs
@@ -340,11 +325,8 @@
        /* If not we must idle until we do. */
        teq     r3, #0x00000000
        beq     _ASM_LABEL(idle)
-Lidle_ret:
 
-       /* restore old proc */
-       mov     r1, r5
-
+sw1:
        /* rem: r1 = old lwp */
        /* rem: r3 = whichqs */
        /* rem: interrupts are disabled */
@@ -430,15 +412,6 @@
        str     r0, [r6, #(L_BACK)]
 
 switch_resume:
-#if defined(LOCKDEBUG)
-       /*
-        * unlock the sched_lock, but leave interrupts off, for now.
-        */
-       mov     r7, r1
-       bl      _C_LABEL(sched_unlock_idle)
-       mov     r1, r7
-#endif
-
        /* l->l_cpu initialized in fork1() for single-processor */
 
        /* Process is now on a processor. */
@@ -616,26 +589,16 @@
 ENTRY(cpu_preempt)
        stmfd   sp!, {r4-r7, lr}
 
-       /* stash the lwp parameters */
+       /* Lower the spl level to spl0 and get the current spl level. */
        mov     r6, r0          /* save old lwp */
        mov     r7, r1          /* save new lwp */
 
-#if defined(LOCKDEBUG)
-       bl      _C_LABEL(sched_unlock_idle)
-#endif
-
-       /* Lower the spl level to spl0 and get the current spl level. */
-#ifdef __NEWINTR
-       mov     r0, #(IPL_NONE)
-       bl      _C_LABEL(_spllower)
-#else  
 #ifdef spl0
        mov     r0, #(_SPL_0)
        bl      _C_LABEL(splx)
 #else
        bl      _C_LABEL(spl0)
-#endif /* spl0 */
-#endif /* __NEWINTR */
+#endif
 
        /* Push the old spl level onto the stack */
        str     r0, [sp, #-0x0004]!
@@ -646,9 +609,6 @@
        /* rem: r1 = new lwp */
 
        IRQdisable
-#if defined(LOCKDEBUG)
-       bl      _C_LABEL(sched_lock_idle)
-#endif
 
        /* Do we have any active queues? */
        ldr     r7, Lwhichqs
@@ -719,12 +679,12 @@
        .word   _C_LABEL(kernel_map)
 
 /*
- * void switch_exit(struct proc *p);
- * Switch to proc0's saved context and deallocate the address space and kernel
- * stack for p.  Then jump into cpu_switch(), as if we were in proc0 all along.
+ * void switch_exit(struct lwp *l, struct lwp *l0);
+ * Switch to lwp0's saved context and deallocate the address space and kernel
+ * stack for l.  Then jump into cpu_switch(), as if we were in lwp0 all along.
  */
 
-/* LINTSTUB: Func: void switch_exit(struct proc *p) */
+/* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
 ENTRY(switch_exit)
        /*
         * r0 = lwp
@@ -788,9 +748,16 @@
        ldr     r1, Lcurlwp
        str     r0, [r1]
 
-        ldr     r1, Llwp0
+       ldr     r1, Llwp0
        b       Lswitch_search
 
+/*
+ * void switch_lwp_exit(struct lwp *l, struct lwp *l0);
+ * Switch to lwp0's saved context and deallocate the address space and kernel
+ * stack for l.  Then jump into cpu_switch(), as if we were in lwp0 all along.
+ */
+
+/* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0) */
 ENTRY(switch_lwp_exit)
        /*
         * r0 = lwp
@@ -854,7 +821,7 @@
        mov     r0, #0x00000000
        str     r0, [r1]
 
-       ldr     r5, Llwp0
+       ldr     r1, Llwp0
        b       Lswitch_search
 
 /* LINTSTUB: Func: void savectx(struct pcb *pcb) */



Home | Main Index | Thread Index | Old Index