Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/nathanw_sa]: src/sys/kern Clean up some trailing whitespace.



details:   https://anonhg.NetBSD.org/src/rev/066f5be55d75
branches:  nathanw_sa
changeset: 506412:066f5be55d75
user:      nathanw <nathanw%NetBSD.org@localhost>
date:      Sun Oct 27 21:12:38 2002 +0000

description:
Clean up some trailing whitespace.

diffstat:

 sys/kern/kern_lwp.c  |  67 ++++++++++++++++++++++---------------------
 sys/kern/kern_sa.c   |  70 ++++++++++++++++++++++----------------------
 sys/kern/kern_time.c |  80 ++++++++++++++++++++++++++--------------------------
 3 files changed, 109 insertions(+), 108 deletions(-)

diffs (truncated from 788 to 300 lines):

diff -r 3da7ce8ac92d -r 066f5be55d75 sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Sat Oct 26 02:17:44 2002 +0000
+++ b/sys/kern/kern_lwp.c       Sun Oct 27 21:12:38 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.1.2.18 2002/10/25 17:26:27 nathanw Exp $        */
+/*     $NetBSD: kern_lwp.c,v 1.1.2.19 2002/10/27 21:12:38 nathanw Exp $        */
 
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
@@ -95,8 +95,8 @@
         * __LWP_ASLWP is probably needed for Solaris compat.
         */
 
-       newlwp(l, p, uaddr, 
-           SCARG(uap, flags) & LWP_DETACHED, 
+       newlwp(l, p, uaddr,
+           SCARG(uap, flags) & LWP_DETACHED,
            NULL, NULL, startlwp, newuc, &l2);
 
        if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
@@ -111,7 +111,7 @@
                l2->l_stat = LSSUSPENDED;
        }
 
-       error = copyout(&l2->l_lid, SCARG(uap, new_lwp), 
+       error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
            sizeof(l2->l_lid));
        if (error)
                return (error);
@@ -120,7 +120,7 @@
 }
 
 
-int    
+int
 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
 {
 
@@ -135,7 +135,7 @@
 {
 
        *retval = l->l_lid;
-       
+
        return (0);
 }
 
@@ -161,7 +161,7 @@
                return (ESRCH);
 
        if (t == l) {
-               /* 
+               /*
                 * Check for deadlock, which is only possible
                 * when we're suspending ourself.
                 */
@@ -176,7 +176,7 @@
                SCHED_LOCK(s);
                l->l_stat = LSSUSPENDED;
                /* XXX NJWLWP check if this makes sense here: */
-               l->l_proc->p_stats->p_ru.ru_nvcsw++; 
+               l->l_proc->p_stats->p_ru.ru_nvcsw++;
                mi_switch(l, NULL);
                SCHED_ASSERT_UNLOCKED();
                splx(s);
@@ -249,7 +249,7 @@
        if (l->l_stat != LSSUSPENDED)
                return;
 
-       if (l->l_wchan == 0) { 
+       if (l->l_wchan == 0) {
                /* LWP was runnable before being suspended. */
                SCHED_LOCK(s);
                setrunnable(l);
@@ -278,7 +278,7 @@
 
        if (t == NULL)
                return (ESRCH);
-       
+
        if (t->l_stat != LSSLEEP)
                return (ENODEV);
 
@@ -330,13 +330,13 @@
 
        if (lid == l->l_lid)
                return (EDEADLK); /* Waiting for ourselves makes no sense. */
-       
+
        wpri = PWAIT |
            ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
- loop:       
+ loop:
        nfound = 0;
        LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
-               if ((l2 == l) || (l2->l_flag & L_DETACHED) || 
+               if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
                    ((lid != 0) && (lid != l2->l_lid)))
                        continue;
 
@@ -344,7 +344,7 @@
                if (l2->l_stat == LSZOMB) {
                        if (departed)
                                *departed = l2->l_lid;
-                       
+
                        s = proclist_lock_write();
                        LIST_REMOVE(l2, l_zlist); /* off zomblwp */
                        proclist_unlock_write(s);
@@ -355,9 +355,9 @@
                        p->p_nzlwps--;
                        simple_unlock(&p->p_lwplock);
                        /* XXX decrement limits */
-                       
+
                        pool_put(&lwp_pool, l2);
-                       
+
                        return (0);
                } else if (l2->l_stat == LSSLEEP ||
                           l2->l_stat == LSSUSPENDED) {
@@ -374,18 +374,18 @@
                        }
                        if (l3 == NULL) /* Everyone else is waiting. */
                                return (EDEADLK);
-                               
+
                        /* XXX we'd like to check for a cycle of waiting
                         * LWPs (specific LID waits, not any-LWP waits)
                         * and detect that sort of deadlock, but we don't
                         * have a good place to store the lwp that is
                         * being waited for. wchan is already filled with
                         * &p->p_nlwps, and putting the lwp address in
-                        * there for deadlock tracing would require 
+                        * there for deadlock tracing would require
                         * exiting LWPs to call wakeup on both their
                         * own address and &p->p_nlwps, to get threads
                         * sleeping on any LWP exiting.
-                        * 
+                        *
                         * Revisit later. Maybe another auxillary
                         * storage location associated with sleeping
                         * is in order.
@@ -396,7 +396,7 @@
        if (nfound == 0)
                return (ESRCH);
 
-       if ((error = tsleep((caddr_t) &p->p_nlwps, wpri, 
+       if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
            (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
                return (error);
 
@@ -405,7 +405,7 @@
 
 
 int
-newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, 
+newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr,
     int flags, void *stack, size_t stacksize,
     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
 {
@@ -420,10 +420,10 @@
 
 
        memset(&l2->l_startzero, 0,
-              (unsigned) ((caddr_t)&l2->l_endzero - 
+              (unsigned) ((caddr_t)&l2->l_endzero -
                           (caddr_t)&l2->l_startzero));
        memcpy(&l2->l_startcopy, &l1->l_startcopy,
-              (unsigned) ((caddr_t)&l2->l_endcopy - 
+              (unsigned) ((caddr_t)&l2->l_endcopy -
                           (caddr_t)&l2->l_startcopy));
 
 #if !defined(MULTIPROCESSOR)
@@ -451,7 +451,7 @@
                *rnewlwpp = l2;
 
        l2->l_addr = (struct user *)uaddr;
-       uvm_lwp_fork(l1, l2, stack, stacksize, func, 
+       uvm_lwp_fork(l1, l2, stack, stacksize, func,
            (arg != NULL) ? arg : l2);
 
 
@@ -460,7 +460,7 @@
        LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
        p2->p_nlwps++;
        simple_unlock(&p2->p_lwplock);
-       
+
        /* XXX should be locked differently... */
        s = proclist_lock_write();
        LIST_INSERT_HEAD(&alllwp, l2, l_list);
@@ -482,18 +482,19 @@
        int s;
 
        DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
-       DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n", 
+       DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
            p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
-       /* 
+
+       /*
         * If we are the last live LWP in a process, we need to exit
         * the entire process (if that's not already going on). We do
         * so with an exit status of zero, because it's a "controlled"
-        * exit, and because that's what Solaris does.  
+        * exit, and because that's what Solaris does.
         */
        if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
                DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
                    p->p_pid, l->l_lid));
-               exit1(l, 0); 
+               exit1(l, 0);
        }
 
        s = proclist_lock_write();
@@ -510,7 +511,7 @@
        simple_unlock(&p->p_lwplock);
 
        l->l_stat = LSDEAD;
-       
+
        /* cpu_exit() will not return */
        cpu_exit(l, 0);
 
@@ -528,9 +529,9 @@
        wakeup(&deadproc);
 }
 
-/* 
- * Pick a LWP to represent the process for those operations which 
- * want information about a "process" that is actually associated 
+/*
+ * Pick a LWP to represent the process for those operations which
+ * want information about a "process" that is actually associated
  * with a LWP.
  */
 struct lwp *
diff -r 3da7ce8ac92d -r 066f5be55d75 sys/kern/kern_sa.c
--- a/sys/kern/kern_sa.c        Sat Oct 26 02:17:44 2002 +0000
+++ b/sys/kern/kern_sa.c        Sun Oct 27 21:12:38 2002 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_sa.c,v 1.1.2.37 2002/10/22 17:40:46 nathanw Exp $ */
+/*     $NetBSD: kern_sa.c,v 1.1.2.38 2002/10/27 21:12:39 nathanw Exp $ */
 
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
@@ -206,7 +206,7 @@
        /* We have to be using scheduler activations */
        if (sa == NULL)
                return (EINVAL);
-       
+
        if (p->p_flag & P_SA) /* Already running! */
                return (EBUSY);
 
@@ -245,13 +245,13 @@
 
        *retval = sa->sa_concurrency;
        /*
-        * Concurrency greater than the number of physical CPUs does 
-        * not make sense. 
-        * XXX Should we ever support hot-plug CPUs, this will need 
+        * Concurrency greater than the number of physical CPUs does
+        * not make sense.
+        * XXX Should we ever support hot-plug CPUs, this will need
         * adjustment.
         */
        sa->sa_concurrency = min(SCARG(uap, concurrency), 1 /* XXX ncpus */);
-           
+
        return (0);
 }
 
@@ -283,7 +283,7 @@
         * signals.
         */
        if (p->p_nrlwps == 1) {
-               DPRINTFN(1,("sa_yield(%d.%d) going dormant\n", 
+               DPRINTFN(1,("sa_yield(%d.%d) going dormant\n",
                    p->p_pid, l->l_lid));
                /*
                 * A signal will probably wake us up. Worst case, the upcall
@@ -348,7 +348,7 @@
 
        if (sa->sa_flag & SA_FLAG_PREEMPT)
                sa_upcall(l, SA_UPCALL_PREEMPTED, l, NULL, 0, NULL);
-} 
+}
 
 
 /*
@@ -365,7 +365,7 @@
        struct sadata *sa = l->l_proc->p_sa;
        stack_t st;
 
-       l->l_flag &= ~L_SA; /* XXX prevent recursive upcalls if we sleep for 
+       l->l_flag &= ~L_SA; /* XXX prevent recursive upcalls if we sleep for
                              memory */
        sau = sadata_upcall_alloc(1);



Home | Main Index | Thread Index | Old Index