Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm implement madvice() for MADV_{NORMAL, RANDOM, SEQUENTI...



details:   https://anonhg.NetBSD.org/src/rev/6c123f893b8c
branches:  trunk
changeset: 473165:6c123f893b8c
user:      mrg <mrg%NetBSD.org@localhost>
date:      Sun May 23 06:27:13 1999 +0000

description:
implement madvice() for MADV_{NORMAL,RANDOM,SEQUENTIAL}, others are not yet done.

diffstat:

 sys/uvm/uvm_map.c  |  116 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
 sys/uvm/uvm_map.h  |    3 +-
 sys/uvm/uvm_mmap.c |   69 +++++++++++++++++++++----------
 3 files changed, 163 insertions(+), 25 deletions(-)

diffs (237 lines):

diff -r 48cb32597064 -r 6c123f893b8c sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Sun May 23 02:46:35 1999 +0000
+++ b/sys/uvm/uvm_map.c Sun May 23 06:27:13 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.40 1999/05/20 23:03:23 thorpej Exp $     */
+/*     $NetBSD: uvm_map.c,v 1.41 1999/05/23 06:27:13 mrg Exp $ */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -1847,6 +1847,120 @@
        return(KERN_SUCCESS);
 }
 
+/* 
+ * uvm_map_advice: set advice code for range of addrs in map.
+ *
+ * => map must be unlocked
+ */
+
+int
+uvm_map_advice(map, start, end, new_advice)
+       vm_map_t map;
+       vaddr_t start;
+       vaddr_t end;
+       int new_advice;
+{
+       vm_map_entry_t entry, temp_entry;
+       UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
+       UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
+           map, start, end, new_advice);
+
+       vm_map_lock(map);
+       
+       VM_MAP_RANGE_CHECK(map, start, end);
+       
+       if (uvm_map_lookup_entry(map, start, &temp_entry)) {
+               entry = temp_entry;
+               UVM_MAP_CLIP_START(map, entry, start);
+       } else {
+               entry = temp_entry->next;
+       }
+       
+       while ((entry != &map->header) && (entry->start < end)) {
+               UVM_MAP_CLIP_END(map, entry, end);
+
+               switch (new_advice) {
+               case MADV_NORMAL:
+               case MADV_RANDOM:
+               case MADV_SEQUENTIAL:
+                       /* nothing special here */
+                       break;
+
+#if 0
+               case MADV_WILLNEED:
+                       /* activate all these pages */
+                       /* XXX */
+                       /*
+                        * should invent a "weak" mode for uvm_fault()
+                        * which would only do the PGO_LOCKED pgo_get().
+                        */
+                       break;
+
+               case MADV_DONTNEED:
+                       /* deactivate this page */
+                       /* XXX */
+                       /*
+                        * vm_page_t p;
+                        * uvm_lock_pageq();
+                        * for (p in each page)
+                        *      if (not_wired)
+                        *              uvm_pagedeactivate(p);
+                        * uvm_unlock_pageq();
+                        */
+                       break;
+
+               case MADV_SPACEAVAIL:
+                       /* 
+                        * XXXMRG
+                        * what is this?  i think:  "ensure that we have
+                        * allocated backing-store for these pages".  this
+                        * is going to require changes in the page daemon,
+                        * as it will free swap space allocated to pages in
+                        * core.  there's also what to do for
+                        * device/file/anonymous memory..
+                        */
+                       break;
+
+               case MADV_GARBAGE:
+                       /* pages are `empty' and can be garbage collected */
+                       /* XXX */
+                       /*
+                        * (perhaps MADV_FREE? check freebsd's MADV_FREE).
+                        * 
+                        * need to do this:
+                        *      - clear all the referenced and modified bits on
+                        *        the pages,
+                        *      - delete any backing store,
+                        *      - mark the page as `recycable'.
+                        *
+                        * So, if you start paging, the pages would be thrown out
+                        * and then zero-filled the next time they're used.
+                        * Otherwise you'd just reuse them directly.  Once the
+                        * page has been modified again, it would no longer be
+                        * recyclable.  That way, malloc() can just tell the
+                        * system when pages are `empty'; if memory is needed,
+                        * they'll be tossed; if memory is not needed, there
+                        * will be no additional overhead.
+                        */
+                       break;
+#endif
+
+               default:
+                       UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
+                       return (KERN_INVALID_ARGUMENT);
+               }
+
+
+               entry->advice = new_advice;
+               
+               entry = entry->next;
+       }
+
+       vm_map_unlock(map);
+       UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
+       return (KERN_SUCCESS);
+}
+
 /*
  * uvm_map_pageable: sets the pageability of a range in a map.
  *
diff -r 48cb32597064 -r 6c123f893b8c sys/uvm/uvm_map.h
--- a/sys/uvm/uvm_map.h Sun May 23 02:46:35 1999 +0000
+++ b/sys/uvm/uvm_map.h Sun May 23 06:27:13 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.h,v 1.12 1999/05/20 23:03:23 thorpej Exp $     */
+/*     $NetBSD: uvm_map.h,v 1.13 1999/05/23 06:27:13 mrg Exp $ */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -146,6 +146,7 @@
                        boolean_t));
 int            uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t,
                        vm_inherit_t));
+int            uvm_map_advice __P((vm_map_t, vaddr_t, vaddr_t, int));
 void           uvm_map_init __P((void));
 boolean_t      uvm_map_lookup_entry __P((vm_map_t, vaddr_t, 
                        vm_map_entry_t *));
diff -r 48cb32597064 -r 6c123f893b8c sys/uvm/uvm_mmap.c
--- a/sys/uvm/uvm_mmap.c        Sun May 23 02:46:35 1999 +0000
+++ b/sys/uvm/uvm_mmap.c        Sun May 23 06:27:13 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_mmap.c,v 1.20 1999/05/03 09:08:28 mrg Exp $        */
+/*     $NetBSD: uvm_mmap.c,v 1.21 1999/05/23 06:27:13 mrg Exp $        */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -120,28 +120,6 @@
 }
 
 /*
- * sys_madvise: give advice about memory usage.
- */
-
-/* ARGSUSED */
-int
-sys_madvise(p, v, retval)
-       struct proc *p;
-       void *v;
-       register_t *retval;
-{
-#if 0
-       struct sys_madvise_args /* {
-               syscallarg(caddr_t) addr;
-               syscallarg(size_t) len;
-               syscallarg(int) behav;
-       } */ *uap = v;
-#endif
-
-       return (ENOSYS);
-}
-
-/*
  * sys_mincore: determine if pages are in core or not.
  */
 
@@ -696,6 +674,51 @@
 }
 
 /*
+ * sys_madvise: give advice about memory usage.
+ */
+
+/* ARGSUSED */
+int
+sys_madvise(p, v, retval)
+       struct proc *p;
+       void *v;
+       register_t *retval;
+{
+       struct sys_madvise_args /* {
+               syscallarg(caddr_t) addr;
+               syscallarg(size_t) len;
+               syscallarg(int) behav;
+       } */ *uap = v;
+       vaddr_t addr;
+       vsize_t size, pageoff;
+       int advice;
+       
+       addr = (vaddr_t)SCARG(uap, addr);
+       size = (vsize_t)SCARG(uap, len);
+       advice = SCARG(uap, behav);
+
+       /*
+        * align the address to a page boundary, and adjust the size accordingly
+        */
+       pageoff = (addr & PAGE_MASK);
+       addr -= pageoff;
+       size += pageoff;
+       size = (vsize_t) round_page(size);
+
+       if ((int)size < 0)
+               return (EINVAL);
+       
+       switch (uvm_map_advice(&p->p_vmspace->vm_map, addr, addr+size,
+                        advice)) {
+       case KERN_SUCCESS:
+               return (0);
+       case KERN_PROTECTION_FAILURE:
+               return (EACCES);
+       }
+       return (EINVAL);
+}
+
+/*
  * sys_mlock: memory lock
  */
 



Home | Main Index | Thread Index | Old Index