Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm add assertions.



details:   https://anonhg.NetBSD.org/src/rev/6a6f0568dc09
branches:  trunk
changeset: 566562:6a6f0568dc09
user:      yamt <yamt%NetBSD.org@localhost>
date:      Wed May 12 20:09:50 2004 +0000

description:
add assertions.

diffstat:

 sys/uvm/uvm_amap.c   |  5 +++--
 sys/uvm/uvm_glue.c   |  5 +++--
 sys/uvm/uvm_page.h   |  3 ++-
 sys/uvm/uvm_page_i.h |  8 +++++++-
 4 files changed, 15 insertions(+), 6 deletions(-)

diffs (123 lines):

diff -r 5def0a08e83e -r 6a6f0568dc09 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c        Wed May 12 20:05:24 2004 +0000
+++ b/sys/uvm/uvm_amap.c        Wed May 12 20:09:50 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_amap.c,v 1.54 2004/04/25 16:42:44 simonb Exp $     */
+/*     $NetBSD: uvm_amap.c,v 1.55 2004/05/12 20:09:50 yamt Exp $       */
 
 /*
  *
@@ -42,7 +42,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.54 2004/04/25 16:42:44 simonb Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.55 2004/05/12 20:09:50 yamt Exp $");
 
 #undef UVM_AMAP_INLINE         /* enable/disable amap inlines */
 
@@ -301,6 +301,7 @@
         */
 
        amap_lock(amap);
+       KASSERT(amap_refs(amap) == 1); /* amap can't be shared */
        AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
        AMAP_B2SLOT(slotadd, addsize);                  /* slots to add */
        if (flags & AMAP_EXTEND_FORWARDS) {
diff -r 5def0a08e83e -r 6a6f0568dc09 sys/uvm/uvm_glue.c
--- a/sys/uvm/uvm_glue.c        Wed May 12 20:05:24 2004 +0000
+++ b/sys/uvm/uvm_glue.c        Wed May 12 20:09:50 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_glue.c,v 1.80 2004/05/02 13:04:57 pk Exp $ */
+/*     $NetBSD: uvm_glue.c,v 1.81 2004/05/12 20:09:51 yamt Exp $       */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.80 2004/05/02 13:04:57 pk Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.81 2004/05/12 20:09:51 yamt Exp $");
 
 #include "opt_kgdb.h"
 #include "opt_kstack.h"
@@ -598,6 +598,7 @@
        outpri = outpri2 = 0;
        proclist_lock_read();
        LIST_FOREACH(l, &alllwp, l_list) {
+               KASSERT(l->l_proc != NULL);
                if (!swappable(l))
                        continue;
                switch (l->l_stat) {
diff -r 5def0a08e83e -r 6a6f0568dc09 sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h        Wed May 12 20:05:24 2004 +0000
+++ b/sys/uvm/uvm_page.h        Wed May 12 20:09:50 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.h,v 1.37 2004/03/24 07:55:01 junyoung Exp $   */
+/*     $NetBSD: uvm_page.h,v 1.38 2004/05/12 20:09:52 yamt Exp $       */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -304,6 +304,7 @@
 
 #define uvm_lock_pageq()       simple_lock(&uvm.pageqlock)
 #define uvm_unlock_pageq()     simple_unlock(&uvm.pageqlock)
+#define        UVM_LOCK_ASSERT_PAGEQ() LOCK_ASSERT(simple_lock_held(&uvm.pageqlock))
 
 #define uvm_pagehash(obj,off) \
        (((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
diff -r 5def0a08e83e -r 6a6f0568dc09 sys/uvm/uvm_page_i.h
--- a/sys/uvm/uvm_page_i.h      Wed May 12 20:05:24 2004 +0000
+++ b/sys/uvm/uvm_page_i.h      Wed May 12 20:09:50 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page_i.h,v 1.21 2002/12/01 22:58:43 matt Exp $     */
+/*     $NetBSD: uvm_page_i.h,v 1.22 2004/05/12 20:09:52 yamt Exp $     */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,6 +152,7 @@
 uvm_pagewire(pg)
        struct vm_page *pg;
 {
+       UVM_LOCK_ASSERT_PAGEQ();
        if (pg->wire_count == 0) {
                uvm_pagedequeue(pg);
                uvmexp.wired++;
@@ -170,6 +171,7 @@
 uvm_pageunwire(pg)
        struct vm_page *pg;
 {
+       UVM_LOCK_ASSERT_PAGEQ();
        pg->wire_count--;
        if (pg->wire_count == 0) {
                TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
@@ -192,6 +194,7 @@
 uvm_pagedeactivate(pg)
        struct vm_page *pg;
 {
+       UVM_LOCK_ASSERT_PAGEQ();
        if (pg->pqflags & PQ_ACTIVE) {
                TAILQ_REMOVE(&uvm.page_active, pg, pageq);
                pg->pqflags &= ~PQ_ACTIVE;
@@ -215,6 +218,7 @@
 uvm_pageactivate(pg)
        struct vm_page *pg;
 {
+       UVM_LOCK_ASSERT_PAGEQ();
        uvm_pagedequeue(pg);
        if (pg->wire_count == 0) {
                TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
@@ -232,10 +236,12 @@
        struct vm_page *pg;
 {
        if (pg->pqflags & PQ_ACTIVE) {
+               UVM_LOCK_ASSERT_PAGEQ();
                TAILQ_REMOVE(&uvm.page_active, pg, pageq);
                pg->pqflags &= ~PQ_ACTIVE;
                uvmexp.active--;
        } else if (pg->pqflags & PQ_INACTIVE) {
+               UVM_LOCK_ASSERT_PAGEQ();
                TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
                pg->pqflags &= ~PQ_INACTIVE;
                uvmexp.inactive--;



Home | Main Index | Thread Index | Old Index