Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm implement VM_PROC_MAP



details:   https://anonhg.NetBSD.org/src/rev/e38c506fd906
branches:  trunk
changeset: 810818:e38c506fd906
user:      christos <christos%NetBSD.org@localhost>
date:      Thu Sep 24 14:35:15 2015 +0000

description:
implement VM_PROC_MAP

diffstat:

 sys/uvm/uvm_map.c |  173 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 169 insertions(+), 4 deletions(-)

diffs (212 lines):

diff -r 171ea9ef87fc -r e38c506fd906 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Thu Sep 24 14:34:22 2015 +0000
+++ b/sys/uvm/uvm_map.c Thu Sep 24 14:35:15 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.334 2015/06/22 06:24:17 matt Exp $       */
+/*     $NetBSD: uvm_map.c,v 1.335 2015/09/24 14:35:15 christos Exp $   */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.334 2015/06/22 06:24:17 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.335 2015/09/24 14:35:15 christos Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -81,10 +81,11 @@
 #include <sys/kernel.h>
 #include <sys/mount.h>
 #include <sys/vnode.h>
+#include <sys/filedesc.h>
 #include <sys/lockdebug.h>
 #include <sys/atomic.h>
+#include <sys/sysctl.h>
 #ifndef __USER_VA0_IS_SAFE
-#include <sys/sysctl.h>
 #include <sys/kauth.h>
 #include "opt_user_va0_disable_default.h"
 #endif
@@ -4783,15 +4784,179 @@
        user_va0_disable = !!t;
        return 0;
 }
+#endif
+
+static int
+fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
+    struct vm_map *m, struct vm_map_entry *e)
+{
+#ifndef _RUMPKERNEL
+       int error;
+
+       memset(kve, 0, sizeof(*kve));
+       KASSERT(e != NULL);
+       if (UVM_ET_ISOBJ(e)) {
+               struct uvm_object *uobj = e->object.uvm_obj;
+               KASSERT(uobj != NULL);
+               kve->kve_ref_count = uobj->uo_refs;
+               kve->kve_count = uobj->uo_npages;
+               if (UVM_OBJ_IS_VNODE(uobj)) {
+                       struct vattr va;
+                       struct vnode *vp = (struct vnode *)uobj;
+                       vn_lock(vp, LK_SHARED | LK_RETRY);
+                       error = VOP_GETATTR(vp, &va, l->l_cred);
+                       VOP_UNLOCK(vp);
+                       kve->kve_type = KVME_TYPE_VNODE;
+                       if (error == 0) {
+                               kve->kve_vn_size = vp->v_size;
+                               kve->kve_vn_type = (int)vp->v_type;
+                               kve->kve_vn_mode = va.va_mode;
+                               kve->kve_vn_rdev = va.va_rdev;
+                               kve->kve_vn_fileid = va.va_fileid;
+                               kve->kve_vn_fsid = va.va_fsid;
+                               error = vnode_to_path(kve->kve_path,
+                                   sizeof(kve->kve_path) / 2, vp, l, p);
+#ifdef DIAGNOSTIC
+                               if (error)
+                                       printf("%s: vp %p error %d\n", __func__,
+                                               vp, error);
+#endif
+                       }
+               } else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
+                       kve->kve_type = KVME_TYPE_KERN;
+               } else if (UVM_OBJ_IS_DEVICE(uobj)) {
+                       kve->kve_type = KVME_TYPE_DEVICE;
+               } else if (UVM_OBJ_IS_AOBJ(uobj)) {
+                       kve->kve_type = KVME_TYPE_ANON;
+               } else {
+                       kve->kve_type = KVME_TYPE_OBJECT;
+               }
+       } else if (UVM_ET_ISSUBMAP(e)) {
+               struct vm_map *map = e->object.sub_map;
+               KASSERT(map != NULL);
+               kve->kve_ref_count = map->ref_count;
+               kve->kve_count = map->nentries;
+               kve->kve_type = KVME_TYPE_SUBMAP;
+       } else
+               kve->kve_type = KVME_TYPE_UNKNOWN;
+
+       kve->kve_start = e->start;
+       kve->kve_end = e->end;
+       kve->kve_offset = e->offset;
+       kve->kve_wired_count = e->wired_count;
+       kve->kve_inheritance = e->inheritance;
+       kve->kve_attributes = e->map_attrib;
+       kve->kve_advice = e->advice;
+#define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
+       (((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
+       (((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
+       kve->kve_protection = PROT(e->protection);
+       kve->kve_max_protection = PROT(e->max_protection);
+       kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
+           ? KVME_FLAG_COW : 0;
+       kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
+           ? KVME_FLAG_NEEDS_COPY : 0;
+       kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
+           ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
+       kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
+           ? KVME_FLAG_PAGEABLE : 0;
+#endif
+       return 0;
+}
+
+static int
+fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
+    size_t *oldlenp)
+{
+       int error;
+       struct proc *p;
+       struct kinfo_vmentry vme;
+       struct vmspace *vm;
+       struct vm_map *map;
+       struct vm_map_entry *entry;
+       char *dp;
+       size_t count;
+
+       count = 0;
+
+       if ((error = proc_find_locked(l, &p, pid)) != 0)
+               return error;
+
+       if ((error = proc_vmspace_getref(p, &vm)) != 0)
+               goto out;
+
+       map = &vm->vm_map;
+       vm_map_lock_read(map);
+
+       dp = oldp;
+       for (entry = map->header.next; entry != &map->header;
+           entry = entry->next) {
+               if (oldp && (dp - (char *)oldp) < *oldlenp + elem_size) {
+                       error = fill_vmentry(l, p, &vme, map, entry);
+                       if (error)
+                               break;
+                       error = sysctl_copyout(l, &vme, dp,
+                           min(elem_size, sizeof(vme)));
+                       if (error)
+                               break;
+                       dp += elem_size;
+               }
+               count++;
+       }
+       vm_map_unlock_read(map);
+       uvmspace_free(vm);
+out:
+       if (pid != -1)
+               mutex_exit(p->p_lock);
+       if (error == 0) {
+               count *= elem_size;
+               if (oldp != NULL && *oldlenp < count)
+                       error = ENOSPC;
+               *oldlenp = count;
+       }
+       return error;
+}
+
+static int
+sysctl_vmproc(SYSCTLFN_ARGS)
+{
+       int error;
+
+       if (namelen == 1 && name[0] == CTL_QUERY)
+               return (sysctl_query(SYSCTLFN_CALL(rnode)));
+
+       if (namelen == 0)
+               return EINVAL;
+
+       switch (name[0]) {
+       case VM_PROC_MAP:
+               if (namelen != 3)
+                       return EINVAL;
+               sysctl_unlock();
+               error = fill_vmentries(l, name[1], name[2],
+                   oldp, oldlenp);
+               sysctl_relock();
+               return error;
+       default:
+               return EINVAL;
+       }
+}
 
 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
 {
 
+       sysctl_createv(clog, 0, NULL, NULL,
+                      CTLFLAG_PERMANENT,
+                      CTLTYPE_STRUCT, "proc",
+                      SYSCTL_DESCR("Process vm information"),
+                      sysctl_vmproc, 0, NULL, 0,
+                      CTL_VM, VM_PROC, CTL_EOL);
+#ifndef __USER_VA0_IS_SAFE
         sysctl_createv(clog, 0, NULL, NULL,
                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
                        CTLTYPE_INT, "user_va0_disable",
                        SYSCTL_DESCR("Disable VA 0"),
                        sysctl_user_va0_disable, 0, &user_va0_disable, 0,
                        CTL_VM, CTL_CREATE, CTL_EOL);
+#endif
 }
-#endif



Home | Main Index | Thread Index | Old Index