Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/nathanw_sa]: src/sys/arch/vax Fix a couple of obvious problems with cpu_...



details:   https://anonhg.NetBSD.org/src/rev/bbce1f8c8b56
branches:  nathanw_sa
changeset: 506637:bbce1f8c8b56
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Mon Dec 30 18:49:33 2002 +0000

description:
Fix a couple of obvious problems with cpu_preempt():
* Change the calling convention so that it will work with the REI
  executed at the end (i.e. push the PSL and use JSB, rather can CALLS,
  as is done for cpu_switch()).  This makes cpu_preempt() a macro, with
  the actual routine being named Swtchto.
* Make sure to release the sched_lock, as appropriate.

diffstat:

 sys/arch/vax/include/macros.h |   9 ++++++---
 sys/arch/vax/vax/subr.S       |  19 +++++++++++++------
 2 files changed, 19 insertions(+), 9 deletions(-)

diffs (79 lines):

diff -r c769b994c1dd -r bbce1f8c8b56 sys/arch/vax/include/macros.h
--- a/sys/arch/vax/include/macros.h     Mon Dec 30 18:34:58 2002 +0000
+++ b/sys/arch/vax/include/macros.h     Mon Dec 30 18:49:33 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: macros.h,v 1.23.8.6 2002/10/18 02:40:30 nathanw Exp $  */
+/*     $NetBSD: macros.h,v 1.23.8.7 2002/12/30 18:49:33 thorpej Exp $  */
 
 /*
  * Copyright (c) 1994, 1998, 2000 Ludd, University of Lule}, Sweden.
@@ -344,12 +344,15 @@
 #define remrunqueue(p) \
        __asm__ __volatile("movl %0,%%r0;jsb Remrq" :: "g"(p):"r0","r1","r2");
 
-#define cpu_switch(p,newp) ({                                          \
-       register int ret;                                                       \
+#define cpu_switch(p, newp) ({                                                 \
+       register int ret;                                               \
        __asm__ __volatile("movpsl -(%%sp);jsb Swtch; movl %%r0,%0"     \
            : "=g"(ret) ::"r0","r1","r2","r3","r4","r5");               \
        ret; })
 
+#define        cpu_preempt(p, newp)                                            \
+       __asm __volatile("movpsl -(%%sp); movl %0,%%r0; jsb Swtchto"    \
+           :: "g" (newp) : "r0", "r1", "r2", "r3", "r4", "r5")
 
 /*
  * Interlock instructions. Used both in multiprocessor environments to
diff -r c769b994c1dd -r bbce1f8c8b56 sys/arch/vax/vax/subr.S
--- a/sys/arch/vax/vax/subr.S   Mon Dec 30 18:34:58 2002 +0000
+++ b/sys/arch/vax/vax/subr.S   Mon Dec 30 18:49:33 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr.S,v 1.2.2.6 2002/06/24 22:08:58 nathanw Exp $        */
+/*     $NetBSD: subr.S,v 1.2.2.7 2002/12/30 18:49:34 thorpej Exp $        */
 
 /*
  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@@ -335,8 +335,8 @@
        brb     lp                      # check sched_whichqs again
 
 #
-# cpu_switch, cpu_exit and the idle loop implemented in assembler 
-# for efficiency.  This is called at IPL_HIGH.
+# cpu_switch, cpu_preempt, cpu_exit and the idle loop implemented in
+# assembler for efficiency.  This is called at IPL_HIGH.
 #
 
 JSBENTRY(Swtch)
@@ -408,12 +408,12 @@
        rei
 #endif
 
-ENTRY(cpu_preempt, 0)
+JSBENTRY(Swtchto)
        mfpr    $PR_SSP,%r1             # Get ptr to this cpu_info struct
        clrl    CI_CURLWP(%r1)          # Stop process accounting
        svpctx                          # Now on interrupt stack
 
-       movl    8(%ap),%r0              # get new lwp
+       # New LWP already in %r0
        remque  (%r0),%r2               # remove new lwp from queue
        bneq    1f                      # jmp if queue not empty?
        halt            # XXX
@@ -435,8 +435,15 @@
 
        mtpr    %r3,$PR_PCBB
        ldpctx
+#if defined(LOCKDEBUG)
+       pushl   %r0
+       calls   $0,_C_LABEL(sched_unlock_idle)
+       movl    (%sp)+,%r0
+#elif defined(MULTIPROCESSOR)
+       clrl    _C_LABEL(sched_lock)    # clear sched lock
+#endif
        rei     
-       
+
 #
 # the last routine called by a process.
 #



Home | Main Index | Thread Index | Old Index