Subject: Re: Update: rough SA patch (works on SMP)
To: matthew green <mrg@eterna.com.au>
From: Stephan Uphoff <ups@stups.com>
List: tech-kern
Date: 07/02/2003 14:24:15
Hi,

matthew green wrote:
> 
> OK, i tried this on my laptop (UP :-) and i'm still seeing
> "transcode -v" cpuspin intead of exiting, 

Thanks - I have been able to reproduce and fix this.
( And a few other bugs)

> and i've now seen
> a hang while xmms & mplayer were both running... i haven't
> (yet) setup serial console for the laptop (*it* is normally
> the portable serial console.. ;-) so i have no real debug info.

OK - I guess I can find a "victim" computer with video and audio
to test the programs on.
This might take a few days ...
Any debug info would be greatly appreciated.

Hopefully the patch below will fix all your pthread problems.

Unfortunately I had to patch libpthread ... you have to rebuild
and install the library.

Thanks again for testing the patches.


	Stephan



Index: lib/libpthread/pthread_sig.c
===================================================================
RCS file: /cvsroot/src/lib/libpthread/pthread_sig.c,v
retrieving revision 1.14
diff -u -r1.14 pthread_sig.c
--- lib/libpthread/pthread_sig.c	2003/05/27 15:24:25	1.14
+++ lib/libpthread/pthread_sig.c	2003/07/02 18:26:17
@@ -419,7 +419,7 @@
 		error = __sigtimedwait(&wset, info, (timeout) ? &timo : NULL);
 
 		pthread_spinlock(self, &pt_sigwaiting_lock);
-		if ((error && errno != ECANCELED)
+		if ((error && (errno != ECANCELED || self->pt_cancel))
 		    || (!error && __sigismember14(set, info->si_signo)) ) {
 			/*
 			 * Normal function return. Clear pt_sigwmaster,
Index: sys/kern/kern_exit.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_exit.c,v
retrieving revision 1.117
diff -u -r1.117 kern_exit.c
--- sys/kern/kern_exit.c	2003/06/29 22:31:20	1.117
+++ sys/kern/kern_exit.c	2003/07/02 18:27:05
@@ -179,8 +179,11 @@
 	 */
 	sa = 0;
 	if (p->p_sa != NULL) {
+
 		l->l_flag &= ~L_SA;
+#if 0
 		p->p_flag &= ~P_SA;
+#endif
 		sa = 1;
 	}
 
@@ -461,9 +464,18 @@
 	 * them) and then wait for everyone else to finish.  
 	 */
 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
+#if 0
 		l2->l_flag &= ~(L_DETACHED|L_SA);
+#endif	
+		l2->l_flag &= ~(L_DETACHED);
+	
+		if(l2->l_flag & L_SA_WANTS_VP)
+		{
+			wakeup(l2);
+		}
+
 		if ((l2->l_stat == LSSLEEP && (l2->l_flag & L_SINTR)) ||
-		    l2->l_stat == LSSUSPENDED) {
+		    l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) {
 			SCHED_LOCK(s);
 			setrunnable(l2);
 			SCHED_UNLOCK(s);
Index: sys/kern/kern_lwp.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_lwp.c,v
retrieving revision 1.8
diff -u -r1.8 kern_lwp.c
--- sys/kern/kern_lwp.c	2003/06/23 11:02:05	1.8
+++ sys/kern/kern_lwp.c	2003/07/02 18:27:05
@@ -295,24 +295,39 @@
 	lwpid_t target_lid;
 	struct lwp *t;
 	struct proc *p;
+	int error;
+	int s;
 
 	p = l->l_proc;
 	target_lid = SCARG(uap, target);
 
+	SCHED_LOCK(s);
+
+
 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
 		if (t->l_lid == target_lid)
 			break;
-
-	if (t == NULL)
-		return (ESRCH);
 
-	if (t->l_stat != LSSLEEP)
-		return (ENODEV);
+	if (t == NULL) {
+		error = ESRCH;
+		goto exit;
+	}
+
+	if (t->l_stat != LSSLEEP) {
+		error = ENODEV;
+		goto exit;
+	}
+
+	if ((t->l_flag & L_SINTR) == 0) {
+		error = EBUSY;
+		goto exit;
+	}
 
-	if ((t->l_flag & L_SINTR) == 0)
-		return (EBUSY);
-
 	setrunnable(t);
+	error = 0;
+exit:
+	SCHED_UNLOCK(s);
+
 
 	return 0;
 }
Index: sys/kern/kern_sa.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_sa.c,v
retrieving revision 1.16
diff -u -r1.16 kern_sa.c
--- sys/kern/kern_sa.c	2003/05/28 22:17:20	1.16
+++ sys/kern/kern_sa.c	2003/07/02 18:27:06
@@ -53,6 +53,9 @@
 
 #include <uvm/uvm_extern.h>
 
+#include <sys/kernel.h>  /* For lbolt hack */
+
+static void sa_vp_donate(struct lwp *);
 static int sa_newcachelwp(struct lwp *);
 static struct lwp *sa_vp_repossess(struct lwp *l);
 
@@ -72,6 +75,23 @@
 #endif
 
 
+
+#define SA_LWP_STATE_LOCK(l,s)      \
+    do {                            \
+           s =  (l)->l_flag ;       \
+           (l)->l_flag &= ~L_SA;    \
+    } while (0)                     
+           
+
+
+#define SA_LWP_STATE_UNLOCK(l,s)      \
+    do {                               \
+	(l)->l_flag |=  ( s & L_SA);   \
+    } while (0)                     
+   
+
+
+
 /*
  * sadata_upcall_alloc:
  *
@@ -139,6 +159,10 @@
 		simple_lock_init(&sa->sa_lock);
 		sa->sa_flag = SCARG(uap, flags) & SA_FLAG_ALL;
 		sa->sa_vp = NULL;
+		
+		sa->sa_old_lwp = NULL;
+		sa->sa_vp_wait_count = 0;
+
 		sa->sa_idle = NULL;
 		sa->sa_woken = NULL;
 		sa->sa_concurrency = 1;
@@ -275,7 +299,9 @@
 void
 sa_yield(struct lwp *l)
 {
+#if 0
 	struct lwp *l2;
+#endif
 	struct proc *p = l->l_proc;
 	struct sadata *sa = p->p_sa;
 	int s, ret;
@@ -284,13 +310,21 @@
 	 * If we're the last running LWP, stick around to recieve
 	 * signals.
 	 */
+#if 0
 	if (p->p_nrlwps == 1) {
+#endif
 		DPRINTFN(1,("sa_yield(%d.%d) going dormant\n",
 		    p->p_pid, l->l_lid));
 		/*
 		 * A signal will probably wake us up. Worst case, the upcall
 		 * happens and just causes the process to yield again.
-		 */
+		 */	
+	SCHED_ASSERT_UNLOCKED();
+
+		sa_vp_donate(l);
+
+	SCHED_ASSERT_UNLOCKED();
+
 		s = splsched();	/* Protect from timer expirations */
 		KDASSERT(sa->sa_vp == l);
 		/*
@@ -299,18 +333,35 @@
 		 * going to sleep. It might make more sense for this to
 		 * be handled inside of tsleep....
 		 */
-		ret = 0;
+		ret = 0;	
+
 		while  ((ret == 0) && (p->p_userret == NULL)) {
 			sa->sa_idle = l;
 			l->l_flag &= ~L_SA;
+			SCHED_ASSERT_UNLOCKED();
+		
+
 			ret = tsleep((caddr_t) l, PUSER | PCATCH, "sawait", 0);
+
+			SCHED_ASSERT_UNLOCKED();
+
 			l->l_flag |= L_SA;
 			sa->sa_idle = NULL;
-			sa->sa_vp = l;
+			splx(s);
+			sa_vp_donate(l);
+			KDASSERT(sa->sa_vp == l);
+
+		
+			s = splsched();	/* Protect from timer expirations */
+
 		}
+
+
+		l->l_flag |= L_SA_UPCALL; 
 		splx(s);
 		DPRINTFN(1,("sa_yield(%d.%d) returned\n",
 		    p->p_pid, l->l_lid));
+#if 0
 	} else {
 		DPRINTFN(1,("sa_yield(%d.%d) stepping aside\n", p->p_pid, l->l_lid));
 	
@@ -334,6 +385,7 @@
 		KDASSERT(p->p_flag & P_WEXIT);
 		/* mostly NOTREACHED */
 	}
+#endif
 }
 
 
@@ -371,11 +423,15 @@
 	struct sadata_upcall *sau;
 	struct sadata *sa = l->l_proc->p_sa;
 	stack_t st;
+	int s;
+
+	/* XXX prevent recursive upcalls if we sleep formemory */
+	SA_LWP_STATE_LOCK(l,s);
 
-	l->l_flag &= ~L_SA; /* XXX prevent recursive upcalls if we sleep for
-			      memory */
 	sau = sadata_upcall_alloc(1);
-	l->l_flag |= L_SA;
+
+	SA_LWP_STATE_UNLOCK(l,s);
+
 
 	if (sa->sa_nstacks == 0) {
 		/* assign to assure that it gets freed */
@@ -477,6 +533,12 @@
 	    type, sa->sa_vp ? sa->sa_vp->l_lid : 0));
 	SCHED_ASSERT_LOCKED();
 
+	if (p->p_flag & P_WEXIT)
+	{
+		mi_switch(l,0);
+		return;
+	}
+
 	if (sa->sa_vp == l) {
 		/*
 		 * Case 1: we're blocking for the first time; generate
@@ -497,6 +559,8 @@
 			 * XXX the recovery from this situation deserves
 			 * XXX more thought.
 			 */
+
+			/* XXXUPSXXX Should only happen with concurrency > 1 */
 #ifdef DIAGNOSTIC
 			printf("sa_switch(%d.%d): no cached LWP for upcall.\n",
 			    p->p_pid, l->l_lid);
@@ -547,6 +611,7 @@
 
 		l->l_flag |= L_SA_BLOCKING;
 		l2->l_priority = l2->l_usrpri;
+		sa->sa_vp = l2;
 		setrunnable(l2);
 		PRELE(l2); /* Remove the artificial hold-count */
 
@@ -564,9 +629,14 @@
 		 */
 		if (sa->sa_idle)
 			l2 = NULL;
-		else
-			l2 = sa->sa_vp;
+		else {
+			l2 = sa->sa_vp; /* XXXUPSXXX Unfair advantage for l2 ? */
+			if((l2->l_stat != LSRUN) || ((l2->l_flag & L_INMEM) == 0))
+				l2 = NULL;
+		}
 	} else {
+
+#if 0
 		/*
 		 * Case 3: The VP is empty. As in case 2, we were
 		 * woken up and called tsleep again, but additionally,
@@ -585,12 +655,29 @@
 			mi_switch(l, NULL);
 			return;
 		}
+#else
+		mi_switch(l, NULL);
+		return;
+#endif
+		
 	sa_upcall_failed:
+#if 0
 		cpu_setfunc(l2, sa_yieldcall, l2);
 
 		l2->l_priority = l2->l_usrpri;
 		setrunnable(l2);
 		PRELE(l2); /* Remove the artificial hold-count */
+#else
+		
+		/* sa_putcachelwp does not block because we have a hold count on l2 */
+		sa_putcachelwp(p, l2);
+		PRELE(l2); /* Remove the artificial hold-count */
+
+		mi_switch(l, NULL);
+		return;
+
+
+#endif
 	}
 
 
@@ -621,8 +708,10 @@
 	 */
 	if (l->l_flag & L_SA_BLOCKING)
 		l->l_flag |= L_SA_UPCALL;
+#if 0
 	else
 		sa_vp_repossess(l);
+#endif
 }
 
 void
@@ -631,6 +720,7 @@
 	struct lwp *l;
 	struct proc *p;
 	struct sadata *sa;
+	int s;
 
 	l = arg;
 	p = l->l_proc;
@@ -643,11 +733,15 @@
 		/* Allocate the next cache LWP */
 		DPRINTFN(6,("sa_switchcall(%d.%d) allocating LWP\n",
 		    p->p_pid, l->l_lid));
+		SA_LWP_STATE_LOCK(l,s);
 		sa_newcachelwp(l);
+		SA_LWP_STATE_UNLOCK(l,s);
+
 	}
 	upcallret(l);
 }
 
+#if 0
 void
 sa_yieldcall(void *arg)
 {
@@ -672,6 +766,7 @@
 	sa_yield(l);
 	upcallret(l);
 }
+#endif
 
 static int
 sa_newcachelwp(struct lwp *l)
@@ -768,12 +863,16 @@
 	void *stack, *ap;
 	ucontext_t u, *up;
 	int i, nsas, nint, nevents, type;
+	int s;
+	int sig;
 
 	p = l->l_proc;
 	sa = p->p_sa;
+	
+	SCHED_ASSERT_UNLOCKED();
 
 	KERNEL_PROC_LOCK(l);
-	l->l_flag &= ~L_SA;
+	SA_LWP_STATE_LOCK(l,s);
 
 	DPRINTFN(7,("sa_upcall_userret(%d.%d %x) \n", p->p_pid, l->l_lid,
 	    l->l_flag));
@@ -784,9 +883,13 @@
 		DPRINTFN(8,("sa_upcall_userret(%d.%d) unblocking\n",
 		    p->p_pid, l->l_lid));
 
+
 		sau = sadata_upcall_alloc(1);
-		
+		sau->sau_arg = NULL;
+
 		while (sa->sa_nstacks == 0) {
+			int status;
+
 			/*
 			 * This should be a transient condition, so we'll just
 			 * sleep until some stacks come in; presumably, some
@@ -805,16 +908,49 @@
 			 * Ideally, tsleep() would have a variant that took
 			 * a LWP to switch to.
 			 */
-			l->l_flag &= ~L_SA;
+
+			if (p->p_flag & P_WEXIT)
+			{
+				sadata_upcall_free(sau);
+				lwp_exit(l);
+			}
+
 			DPRINTFN(7, ("sa_upcall_userret(%d.%d) sleeping"
 			    " for stacks\n", l->l_proc->p_pid, l->l_lid));
-			tsleep((caddr_t) &sa->sa_nstacks, PWAIT|PCATCH, 
+			status = tsleep((caddr_t) &sa->sa_nstacks, PWAIT|PCATCH, 
 			    "sastacks", 0);
-			if (p->p_flag & P_WEXIT)
-				lwp_exit(l);
-			l->l_flag |= L_SA;
+			if(status)
+			{
+				if (p->p_flag & P_WEXIT)
+				{
+					sadata_upcall_free(sau);
+					lwp_exit(l);
+				}
+				/* Signal pending - can't sleep */
+				/* Wait a while .. things might get better */  
+				 tsleep((caddr_t) &lbolt, PWAIT, "lbolt: sastacks", 0);
+			}	
+
+			/* XXXUPSXXX NEED TO STOP THE LWP HERE ON REQUEST */
+
+		
 		}
+
+		if (p->p_flag & P_WEXIT) {
+			sadata_upcall_free(sau);
+			lwp_exit(l);
+		}
+
+		SCHED_ASSERT_UNLOCKED();
+
 		l2 = sa_vp_repossess(l);
+		
+		SCHED_ASSERT_UNLOCKED();
+			
+		if(l2 == NULL) {
+			sadata_upcall_free(sau);
+			lwp_exit(l);
+		}
 
 		KDASSERT(sa->sa_nstacks > 0);
 
@@ -835,15 +971,40 @@
 			/* NOTREACHED */
 		}
 		l->l_flag &= ~L_SA_BLOCKING;
-	}
 
-	KDASSERT(SIMPLEQ_EMPTY(&sa->sa_upcalls) == 0);
+		/* We migth have sneaked past signal handling and userret */
+		SA_LWP_STATE_UNLOCK(l,s);
+		KERNEL_PROC_UNLOCK(l);
+		/* take pending signals */
+		while ((sig = CURSIG(l)) != 0)
+			postsig(sig);
+
+		/* Invoke per-process kernel-exit handling, if any */
+		if (p->p_userret)
+			(p->p_userret)(l, p->p_userret_arg);
 
-	sau = SIMPLEQ_FIRST(&sa->sa_upcalls);
-	SIMPLEQ_REMOVE_HEAD(&sa->sa_upcalls, sau_next);
+		KERNEL_PROC_LOCK(l);
+		SA_LWP_STATE_LOCK(l,s);
+
+
+
+	}
+
 	if (SIMPLEQ_EMPTY(&sa->sa_upcalls))
+	{		
+
 		l->l_flag &= ~L_SA_UPCALL;
 
+		sa_vp_donate(l);	
+		
+		SA_LWP_STATE_UNLOCK(l,s);
+		KERNEL_PROC_UNLOCK(l);
+		return;
+	}	
+
+	sau = SIMPLEQ_FIRST(&sa->sa_upcalls);
+	SIMPLEQ_REMOVE_HEAD(&sa->sa_upcalls, sau_next);
+	
 	if (sau->sau_flags & SAU_FLAG_DEFERRED) {
 		sa_upcall_getstate(sau,
 		    sau->sau_state.deferred.e_lwp,
@@ -961,10 +1122,22 @@
 	    l->l_lid, type));
 
 	cpu_upcall(l, type, nevents, nint, sapp, ap, stack, sa->sa_upcall);
-	l->l_flag |= L_SA;
+
+	if (SIMPLEQ_EMPTY(&sa->sa_upcalls)) 
+	{
+		l->l_flag &= ~L_SA_UPCALL;
+		sa_vp_donate(l);
+		/* May not be reached  */
+	}
+
+
+	/* May not be reached  */
+	
+	SA_LWP_STATE_UNLOCK(l,s);
 	KERNEL_PROC_UNLOCK(l);
 }
 
+#if 0
 static struct lwp *
 sa_vp_repossess(struct lwp *l)
 {
@@ -977,6 +1150,10 @@
 	 * Put ourselves on the virtual processor and note that the
 	 * previous occupant of that position was interrupted.
 	 */
+
+
+
+
 	l2 = sa->sa_vp;
 	sa->sa_vp = l;
 	if (sa->sa_idle == l2)
@@ -1011,6 +1188,121 @@
 	}
 	return l2;
 }
+#endif
+
+static struct lwp *
+sa_vp_repossess(struct lwp *l)
+{
+	struct lwp *l2;
+	struct proc *p = l->l_proc;
+	struct sadata *sa = p->p_sa;
+	int s;
+		
+	SCHED_ASSERT_UNLOCKED();
+
+	l->l_flag |= L_SA_WANTS_VP;
+	sa->sa_vp_wait_count++;
+
+	if(sa->sa_idle != NULL)
+	{
+		/* XXXUPSXXX Simple but slow */
+		wakeup(sa->sa_idle);
+	}
+	else
+	{
+		SCHED_LOCK(s);
+		sa->sa_vp->l_flag |= L_SA_UPCALL;
+		/* kick the process */
+		signotify(p);
+		SCHED_UNLOCK(s);
+	}
+
+	
+	SCHED_ASSERT_UNLOCKED();
+
+	while(sa->sa_vp != l)
+	{
+	
+
+		tsleep((caddr_t) l, PWAIT, 
+		       "sa processor", 0);
+		
+		/* XXXUPSXXX NEED TO STOP THE LWP HERE ON REQUEST ??? */
+	       	if (p->p_flag & P_WEXIT) {
+			l->l_flag &= ~L_SA_WANTS_VP;
+			sa->sa_vp_wait_count--;
+			return 0;
+		}
+	}
+
+	l2 = sa->sa_old_lwp;
+
+	return l2;
+}
+
+static void
+sa_vp_donate(struct lwp *l)
+{
+	
+	struct proc *p = l->l_proc;
+	struct sadata *sa = p->p_sa;	
+	struct lwp *l2;
+	int s;
+
+	SCHED_ASSERT_UNLOCKED();
+
+	if (sa->sa_vp_wait_count == 0)
+	{
+		return;
+	}
+
+	LIST_FOREACH(l2, &p->p_lwps, l_sibling)
+	{
+		if(l2->l_flag &  L_SA_WANTS_VP)
+		{
+		
+			SCHED_LOCK(s);
+			
+			sa_putcachelwp(p, l);
+			sa->sa_vp = l2;
+			sa->sa_vp_wait_count--;
+			l2->l_flag &= ~L_SA_WANTS_VP;
+			sa->sa_old_lwp = l;
+			
+			sched_wakeup((caddr_t) l2);	
+
+			KERNEL_PROC_UNLOCK(l);
+
+
+			if((l2->l_stat == LSRUN) && ((l2->l_flag & L_INMEM) != 0))
+				mi_switch(l,l2);
+			else
+				mi_switch(l,NULL);
+	
+			/*
+			 * This isn't quite a NOTREACHED; we may get here if
+			 * the process exits before this LWP is reused. In
+			 * that case, we want to call lwp_exit(), which will
+			 * be done by the userret() hooks.
+			 */
+			SCHED_ASSERT_UNLOCKED();
+			splx(s);
+
+			KERNEL_PROC_LOCK(l);
+
+
+			KDASSERT(p->p_flag & P_WEXIT);
+			/* mostly NOTREACHED */
+
+			lwp_exit(l);
+		}
+	}
+	
+#ifdef DIAGNOSTIC
+	printf("sa_vp_donate couldn't find someone to donate the CPU to \n");
+#endif	
+}
+
 
 
 #ifdef DEBUG
Index: sys/kern/kern_sig.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_sig.c,v
retrieving revision 1.143
diff -u -r1.143 kern_sig.c
--- sys/kern/kern_sig.c	2003/06/29 22:31:22	1.143
+++ sys/kern/kern_sig.c	2003/07/02 18:27:07
@@ -528,8 +528,11 @@
 		(void) spl0();		/* XXXSMP */
 	}
 
+
+
 	while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
-		/* void */;
+		;	/* void  */
+	
 	/* always return EINTR rather than ERESTART... */
 	return (EINTR);
 }
@@ -867,7 +870,8 @@
 	 */
 	if ((prop & SA_CANTMASK) == 0
 	    && p->p_sigctx.ps_sigwaited < 0
-	    && sigismember(&p->p_sigctx.ps_sigwait, signum)) {
+	    && sigismember(&p->p_sigctx.ps_sigwait, signum)
+	    &&  p->p_stat != SSTOP) {
 		sigdelset(&p->p_sigctx.ps_siglist, signum);
 		p->p_sigctx.ps_sigwaited = signum;
 		sigemptyset(&p->p_sigctx.ps_sigwait);
@@ -889,7 +893,8 @@
 	if (dolock)
 		SCHED_LOCK(s);
 
-	if (p->p_nrlwps > 0) {
+	/* XXXUPSXXX LWPs might go to sleep without passing signal handling */ 
+	if (p->p_nrlwps > 0 && (p->p_stat != SSTOP)) {
 		/*
 		 * At least one LWP is running or on a run queue. 
 		 * The signal will be noticed when one of them returns 
@@ -903,7 +908,17 @@
 	} else {
 		/* Process is sleeping or stopped */
 		if (p->p_flag & P_SA) {
-			l = p->p_sa->sa_idle;
+			struct lwp *l2 = p->p_sa->sa_vp;
+			l = NULL;		
+			allsusp = 1;
+
+			if ((l2->l_stat == LSSLEEP) &&  (l2->l_flag & L_SINTR))
+				l = l2; 
+			else if (l2->l_stat == LSSUSPENDED)
+				suspended = l2;
+			else if ((l2->l_stat != LSZOMB) && 
+				 (l2->l_stat != LSDEAD))
+				allsusp = 0;
 		} else {
 			/*
 			 * Find out if any of the sleeps are interruptable,
@@ -938,6 +953,7 @@
 				goto out;
 			}
 
+
 			/*
 			 * When a sleeping process receives a stop
 			 * signal, process immediately if possible.
@@ -962,6 +978,7 @@
 				goto out;
 			}
 
+
 			if (l == NULL) {
 				/*
 				 * Special case: SIGKILL of a process
@@ -1059,6 +1076,7 @@
 	if (l->l_priority > PUSER)
 		l->l_priority = PUSER;
  run:
+	
 	setrunnable(l);		/* XXXSMP: recurse? */
  out:
 	/* XXXSMP: works, but icky */
@@ -1074,6 +1092,11 @@
 	siginfo_t *si;	
 
 	if (p->p_flag & P_SA) {
+
+		/* XXXUPSXXX What if not on sa_vp ? */
+
+		int s = l->l_flag & L_SA;
+		l->l_flag &= ~L_SA; 
 		si = pool_get(&siginfo_pool, PR_WAITOK);
 		si->si_signo = sig;
 		si->si_errno = 0;
@@ -1086,6 +1109,9 @@
 
 		sa_upcall(l, SA_UPCALL_SIGNAL | SA_UPCALL_DEFER, le, li, 
 			    sizeof(siginfo_t), si);
+
+		
+		l->l_flag |= s;
 		return;
 	}
 
@@ -1140,6 +1166,16 @@
 	int		dolock = (l->l_flag & L_SINTR) == 0, locked = !dolock;
 	sigset_t	ss;
 
+	
+	if (l->l_flag & L_SA) {
+		struct sadata *sa = p->p_sa;	
+
+		/* Bail out if we do not own the virtual processor */
+		if (sa->sa_vp != l)
+			return 0;
+	}
+
+
 	if (p->p_stat == SSTOP) {
 		/*
 		 * The process is stopped/stopping. Stop ourselves now that
@@ -1322,6 +1358,8 @@
 
 	SCHED_ASSERT_LOCKED();
 
+
+
 	/* XXX lock process LWP state */
 	p->p_stat = SSTOP;
 	p->p_flag &= ~P_WAITED;
@@ -1333,7 +1371,7 @@
 	 */
 	   
 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
-		if (l->l_stat == LSONPROC) {
+		if ((l->l_stat == LSONPROC) && (l == curlwp)) {
 			/* XXX SMP this assumes that a LWP that is LSONPROC
 			 * is curlwp and hence is about to be mi_switched 
 			 * away; the only callers of proc_stop() are:
@@ -1346,7 +1384,14 @@
 			 */
 			l->l_stat = LSSTOP;
 			p->p_nrlwps--;
-		} else if (l->l_stat == LSRUN) {
+		}
+		 else if ( (l->l_stat == LSSLEEP) && (l->l_flag & L_SINTR)) {
+			setrunnable(l);
+		}
+
+/* !!!UPS!!! FIX ME */
+#if 0
+else if (l->l_stat == LSRUN) {
 			/* Remove LWP from the run queue */
 			remrunqueue(l);
 			l->l_stat = LSSTOP;
@@ -1374,9 +1419,11 @@
 			    p->p_pid, l->l_lid, l->l_stat);
 		}
 #endif
+#endif
 	}
 	/* XXX unlock process LWP state */
-		    
+
+	    
 	sched_wakeup((caddr_t)p->p_pptr);
 }
 
@@ -1584,8 +1631,11 @@
 void
 sigexit(struct lwp *l, int signum)
 {
+
 	struct proc	*p;
+#if 0
 	struct lwp	*l2;
+#endif
 	int		error, exitsig;
 
 	p = l->l_proc;
@@ -1601,11 +1651,13 @@
 	p->p_flag |= P_WEXIT;
 	/* We don't want to switch away from exiting. */
 	/* XXX multiprocessor: stop LWPs on other processors. */
+#if 0
 	if (p->p_flag & P_SA) {
 		LIST_FOREACH(l2, &p->p_lwps, l_sibling)
 		    l2->l_flag &= ~L_SA;
 		p->p_flag &= ~P_SA;
 	}
+#endif
 
 	/* Make other LWPs stick around long enough to be dumped */
 	p->p_userret = lwp_coredump_hook;
Index: sys/kern/kern_synch.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_synch.c,v
retrieving revision 1.132
diff -u -r1.132 kern_synch.c
--- sys/kern/kern_synch.c	2003/06/29 22:31:23	1.132
+++ sys/kern/kern_synch.c	2003/07/02 18:27:08
@@ -471,7 +471,7 @@
 	 */
 	if (catch) {
 		l->l_flag |= L_SINTR;
-		if ((sig = CURSIG(l)) != 0) {
+		if (((sig = CURSIG(l)) != 0) || (p->p_flag & P_WEXIT)) {
 			if (l->l_wchan != NULL)
 				unsleep(l);
 			l->l_stat = LSONPROC;
@@ -792,12 +792,14 @@
 {
 	struct lwp *l = curlwp;
 	int r, s;
-
+/* XXXUPSXXX Not needed for SMP patch */
+#if 0   
 	/* XXX Until the preempt() bug is fixed. */
 	if (more && (l->l_proc->p_flag & P_SA)) {
 		l->l_cpu->ci_schedstate.spc_flags &= ~SPCF_SWITCHCLEAR;
 		return;
 	}
+#endif
 
 	SCHED_LOCK(s);
 	l->l_priority = l->l_usrpri;
Index: sys/kern/kern_time.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_time.c,v
retrieving revision 1.70
diff -u -r1.70 kern_time.c
--- sys/kern/kern_time.c	2003/05/28 22:27:57	1.70
+++ sys/kern/kern_time.c	2003/07/02 18:27:08
@@ -869,6 +869,17 @@
 	struct ptimers *pt = (struct ptimers *)arg;
 	unsigned int i, fired, done;
 	KERNEL_PROC_LOCK(l);
+
+	{
+		struct proc	*p = l->l_proc;
+		struct sadata *sa = p->p_sa;	
+
+		/* Bail out if we do not own the virtual processor */
+		if (sa->sa_vp != l) {
+			KERNEL_PROC_UNLOCK(l);
+			return ;
+		}
+	}
 	
 	fired = pt->pts_fired;
 	done = 0;
@@ -1188,8 +1199,9 @@
 itimerfire(struct ptimer *pt)
 {
 	struct proc *p = pt->pt_proc;
+#if 0
 	int s;
-
+#endif
 	if (pt->pt_ev.sigev_notify == SIGEV_SIGNAL) {
 		/*
 		 * No RT signal infrastructure exists at this time;
@@ -1215,17 +1227,24 @@
 			 * makes testing for sa_idle alone insuffucent to
 			 * determine if we really should call setrunnable.
 			 */
+#if 0
+
 		        if ((sa->sa_idle) && (p->p_stat != SSTOP)) {
 				SCHED_LOCK(s);
 				setrunnable(sa->sa_idle);
 				SCHED_UNLOCK(s);
 			}
+#endif
 			pt->pt_poverruns = pt->pt_overruns;
 			pt->pt_overruns = 0;
 			i = 1 << pt->pt_entry;
 			p->p_timers->pts_fired = i;
 			p->p_userret = timerupcall;
 			p->p_userret_arg = p->p_timers;
+			
+			if (sa->sa_idle)
+				wakeup(sa->sa_idle);
+
 		} else if (p->p_userret == timerupcall) {
 			i = 1 << pt->pt_entry;
 			if ((p->p_timers->pts_fired & i) == 0) {
Index: sys/sys/lwp.h
===================================================================
RCS file: /cvsroot/src/sys/sys/lwp.h,v
retrieving revision 1.6
diff -u -r1.6 lwp.h
--- sys/sys/lwp.h	2003/02/04 13:41:48	1.6
+++ sys/sys/lwp.h	2003/07/02 18:27:12
@@ -117,6 +117,7 @@
 #define	L_SA_UPCALL	0x200000 /* SA upcall is pending */
 #define	L_SA_BLOCKING	0x400000 /* Blocking in tsleep() */
 #define	L_DETACHED	0x800000 /* Won't be waited for. */
+#define L_SA_WANTS_VP   0x1000000 /* SA LWP wants a virtual processor */
 
 /*
  * Status values.
Index: sys/sys/savar.h
===================================================================
RCS file: /cvsroot/src/sys/sys/savar.h,v
retrieving revision 1.4
diff -u -r1.4 savar.h
--- sys/sys/savar.h	2003/02/02 02:22:14	1.4
+++ sys/sys/savar.h	2003/07/02 18:27:12
@@ -75,6 +75,9 @@
 	int	sa_flag;		/* SA_* flags */
 	sa_upcall_t	sa_upcall;	/* upcall entry point */
 	struct lwp	*sa_vp;		/* "virtual processor" allocation */
+	struct lwp	*sa_old_lwp;	/*  XXXUPSXXX hack: lwp that used to be on  sa_vp */
+	int    sa_vp_wait_count;        /*  XXXUPSXXX hack: number of LWPs waiting on VP */
+
 	struct lwp	*sa_woken;	/* list of woken lwps */
 	struct lwp	*sa_idle;      	/* lwp in sawait */
 	int	sa_concurrency;		/* desired concurrency */