tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Changing __USING_TOPDOWN_VM to a runtime decision
Hey folks,
I would like to change the current (mostly) compile time decision
wether we will use top-down VA layout for userland processes to a
runtime check.
This allows emulations to disable it, and also allows MD code to recognize
binaries not suitable for topdown VM layout and give those binaries the
old layout.
The latter point is what I actually need: on sparc64 we have compiled most
code in the "medlow" code model, which does not allow big addresses. I am
about to commit changes that switch this default and properly mark new
binaries. To still allow running old binaries, I need something like the
attached patch.
The patch is mostly straight forward: I define a new flag EXEC_TOPDOWN_VM,
initialized by default according to __USING_TOPDOWN_VM, but overridable
by a MD function. This way the exec_package carries over the information,
wether we will use topdown-vm for the to-be-loaded binary.
Most other changes are mechanical, like pass through this information through
a few uvm layers.
For architectures already using topdown-VM, no change is intended.
Comments?
Martin
Index: kern/exec_elf.c
===================================================================
RCS file: /cvsroot/src/sys/kern/exec_elf.c,v
retrieving revision 1.49
diff -u -p -r1.49 exec_elf.c
--- kern/exec_elf.c 5 Nov 2013 14:26:19 -0000 1.49
+++ kern/exec_elf.c 5 Nov 2013 14:27:22 -0000
@@ -422,14 +422,15 @@ elf_load_file(struct lwp *l, struct exec
p = l->l_proc;
KASSERT(p->p_vmspace);
- if (__predict_true(p->p_vmspace != proc0.p_vmspace))
+ if (__predict_true(p->p_vmspace != proc0.p_vmspace)) {
use_topdown = p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN;
- else
+ } else {
#ifdef __USING_TOPDOWN_VM
- use_topdown = true;
+ use_topdown = !!(epp->ep_flags & EXEC_TOPDOWN_VM);
#else
use_topdown = false;
#endif
+ }
/*
* 1. open file
Index: kern/kern_exec.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_exec.c,v
retrieving revision 1.363
diff -u -p -r1.363 kern_exec.c
--- kern/kern_exec.c 12 Sep 2013 19:01:38 -0000 1.363
+++ kern/kern_exec.c 5 Nov 2013 14:27:22 -0000
@@ -112,6 +112,15 @@ __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,
#include <compat/common/compat_util.h>
+#ifndef MD_TOPDOWN_INIT
+#error MD_TOPDOWN_INIT fehlt
+#ifdef __USING_TOPDOWN_VM
+#define MD_TOPDOWN_INIT(epp) (epp)->ep_flags |= EXEC_TOPDOWN_VM
+#else
+#define MD_TOPDOWN_INIT(epp)
+#endif
+#endif
+
static int exec_sigcode_map(struct proc *, const struct emul *);
#ifdef DEBUG_EXEC
@@ -653,6 +662,7 @@ execve_loadvm(struct lwp *l, const char
data->ed_pack.ep_vmcmds.evs_used = 0;
data->ed_pack.ep_vap = &data->ed_attr;
data->ed_pack.ep_flags = 0;
+ MD_TOPDOWN_INIT(&data->ed_pack);
data->ed_pack.ep_emul_root = NULL;
data->ed_pack.ep_interp = NULL;
data->ed_pack.ep_esch = NULL;
@@ -933,10 +943,12 @@ execve_runproc(struct lwp *l, struct exe
*/
if (is_spawn)
uvmspace_spawn(l, data->ed_pack.ep_vm_minaddr,
- data->ed_pack.ep_vm_maxaddr);
+ data->ed_pack.ep_vm_maxaddr,
+ !!(data->ed_pack.ep_flags & EXEC_TOPDOWN_VM));
else
uvmspace_exec(l, data->ed_pack.ep_vm_minaddr,
- data->ed_pack.ep_vm_maxaddr);
+ data->ed_pack.ep_vm_maxaddr,
+ !!(data->ed_pack.ep_flags & EXEC_TOPDOWN_VM));
/* record proc's vnode, for use by procfs and others */
if (p->p_textvp)
Index: kern/kern_proc.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_proc.c,v
retrieving revision 1.189
diff -u -p -r1.189 kern_proc.c
--- kern/kern_proc.c 25 Oct 2013 15:52:57 -0000 1.189
+++ kern/kern_proc.c 5 Nov 2013 14:27:22 -0000
@@ -483,7 +483,13 @@ proc0_init(void)
* share proc0's vmspace, and thus, the kernel pmap.
*/
uvmspace_init(&vmspace0, pmap_kernel(), round_page(VM_MIN_ADDRESS),
- trunc_page(VM_MAX_ADDRESS));
+ trunc_page(VM_MAX_ADDRESS),
+#ifdef __USING_TOPDOWN_VM
+ true
+#else
+ false
+#endif
+ );
/* Initialize signal state for proc0. XXX IPL_SCHED */
mutex_init(&p->p_sigacts->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
Index: sys/exec.h
===================================================================
RCS file: /cvsroot/src/sys/sys/exec.h,v
retrieving revision 1.141
diff -u -p -r1.141 exec.h
--- sys/exec.h 30 Oct 2013 23:32:30 -0000 1.141
+++ sys/exec.h 5 Nov 2013 14:27:23 -0000
@@ -226,6 +226,7 @@ struct exec_package {
#define EXEC_DESTR 0x0010 /* destructive ops performed */
#define EXEC_32 0x0020 /* 32-bit binary emulation */
#define EXEC_FORCEAUX 0x0040 /* always use ELF AUX vector */
+#define EXEC_TOPDOWN_VM 0x0080 /* may use top-down VM layout */
struct exec_vmcmd {
int (*ev_proc)(struct lwp *, struct exec_vmcmd *);
Index: uvm/uvm_extern.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.184
diff -u -p -r1.184 uvm_extern.h
--- uvm/uvm_extern.h 1 Sep 2012 00:26:37 -0000 1.184
+++ uvm/uvm_extern.h 5 Nov 2013 14:27:23 -0000
@@ -643,11 +643,11 @@ bool uvm_map_checkprot(struct vm_map
*
vaddr_t, vm_prot_t);
int uvm_map_protect(struct vm_map *, vaddr_t,
vaddr_t, vm_prot_t, bool);
-struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t);
+struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool);
void uvmspace_init(struct vmspace *, struct pmap *,
- vaddr_t, vaddr_t);
-void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t);
-void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t);
+ vaddr_t, vaddr_t, bool);
+void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool);
+void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool);
struct vmspace *uvmspace_fork(struct vmspace *);
void uvmspace_addref(struct vmspace *);
void uvmspace_free(struct vmspace *);
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.326
diff -u -p -r1.326 uvm_map.c
--- uvm/uvm_map.c 25 Oct 2013 20:25:25 -0000 1.326
+++ uvm/uvm_map.c 5 Nov 2013 14:27:24 -0000
@@ -3927,13 +3927,13 @@ uvm_map_checkprot(struct vm_map *map, va
* - refcnt set to 1, rest must be init'd by caller
*/
struct vmspace *
-uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
+uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
{
struct vmspace *vm;
UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
- uvmspace_init(vm, NULL, vmin, vmax);
+ uvmspace_init(vm, NULL, vmin, vmax, topdown);
UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
return (vm);
}
@@ -3945,15 +3945,14 @@ uvmspace_alloc(vaddr_t vmin, vaddr_t vma
* - refcnt set to 1, rest must be init'd by caller
*/
void
-uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t
vmax)
+uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
+ vaddr_t vmax, bool topdown)
{
UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
memset(vm, 0, sizeof(*vm));
uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
-#ifdef __USING_TOPDOWN_VM
- | VM_MAP_TOPDOWN
-#endif
+ | (topdown ? VM_MAP_TOPDOWN : 0)
);
if (pmap)
pmap_reference(pmap);
@@ -4016,7 +4015,7 @@ uvmspace_unshare(struct lwp *l)
*/
void
-uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end)
+uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
{
struct proc *p = l->l_proc;
struct vmspace *nvm;
@@ -4025,7 +4024,7 @@ uvmspace_spawn(struct lwp *l, vaddr_t st
cpu_vmspace_exec(l, start, end);
#endif
- nvm = uvmspace_alloc(start, end);
+ nvm = uvmspace_alloc(start, end, topdown);
kpreempt_disable();
p->p_vmspace = nvm;
pmap_activate(l);
@@ -4037,7 +4036,7 @@ uvmspace_spawn(struct lwp *l, vaddr_t st
*/
void
-uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
+uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
{
struct proc *p = l->l_proc;
struct vmspace *nvm, *ovm = p->p_vmspace;
@@ -4053,11 +4052,14 @@ uvmspace_exec(struct lwp *l, vaddr_t sta
* see if more than one process is using this vmspace...
*/
- if (ovm->vm_refcnt == 1) {
+ if (ovm->vm_refcnt == 1
+ && topdown == !!(ovm->vm_map.flags & VM_MAP_TOPDOWN)) {
/*
* if p is the only process using its vmspace then we can safely
* recycle that vmspace for the program that is being exec'd.
+ * But only if TOPDOWN matches the requested value for the new
+ * vm space!
*/
#ifdef SYSVSHM
@@ -4099,7 +4101,7 @@ uvmspace_exec(struct lwp *l, vaddr_t sta
* for p
*/
- nvm = uvmspace_alloc(start, end);
+ nvm = uvmspace_alloc(start, end, topdown);
/*
* install new vmspace and drop our ref to the old one.
@@ -4203,7 +4205,8 @@ uvmspace_fork(struct vmspace *vm1)
vm_map_lock(old_map);
- vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
+ vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
+ !!(vm1->vm_map.flags & VM_MAP_TOPDOWN));
memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
(char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
new_map = &vm2->vm_map; /* XXX */
Home |
Main Index |
Thread Index |
Old Index