Current-Users archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kauth and sched_{get,set}param
Mindaugas R. wrote:
Memory allocation/freeing and other operations which may block should
not be performed with the lock held.
Right. I reorganized the code to take care of all the places where it
would allocate/free memory with p_smutex held -- please take a look to
see if I forgot something.
That said, the call to the kauth authorization wrapper is still done
with p_smutex held. For example in sys__sched_setparam(), the logic is
p = p_find(pid, PFIND_UNLOCK_FAIL);
mutex_enter(&p->p_smutex);
mutex_exit(&proclist_lock);
so I don't see how we can call kauth with p without having either
proclist_list or p_smutex held.
-e.
Index: sys_sched.c
===================================================================
RCS file: /cvsroot/src/sys/kern/sys_sched.c,v
retrieving revision 1.9
diff -u -p -r1.9 sys_sched.c
--- sys_sched.c 31 Jan 2008 01:21:17 -0000 1.9
+++ sys_sched.c 1 Feb 2008 13:57:50 -0000
@@ -117,11 +117,6 @@ sys__sched_setparam(struct lwp *l, const
pri_t pri;
int error;
- /* Available only for super-user */
- if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
- KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_SETPARAM), NULL, NULL, NULL))
- return EPERM;
-
/* Get the parameters from the user-space */
sp = kmem_zalloc(sizeof(struct sched_param), KM_SLEEP);
error = copyin(SCARG(uap, params), sp, sizeof(struct sched_param));
@@ -163,6 +158,13 @@ sys__sched_setparam(struct lwp *l, const
mutex_enter(&p->p_smutex);
}
+ /* Available only for super-user */
+ if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER, p,
+ KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_SETPARAM), NULL, NULL)) {
+ mutex_exit(&p->p_smutex);
+ return EPERM;
+ }
+
/* Find the LWP(s) */
lcnt = 0;
lid = SCARG(uap, lid);
@@ -212,12 +214,6 @@ sys__sched_getparam(struct lwp *l, const
lwpid_t lid;
int error;
- if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
- KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_GETPARAM), NULL, NULL, NULL))
- return EPERM;
-
- sp = kmem_zalloc(sizeof(struct sched_param), KM_SLEEP);
-
/* If not specified, use the first LWP */
lid = SCARG(uap, lid) == 0 ? 1 : SCARG(uap, lid);
@@ -234,9 +230,16 @@ sys__sched_getparam(struct lwp *l, const
mutex_exit(&p->p_smutex);
}
if (t == NULL) {
- kmem_free(sp, sizeof(struct sched_param));
return ESRCH;
}
+
+ if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
+ t->l_proc, KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_GETPARAM), NULL,
+ NULL))
+ return EPERM;
+
+ sp = kmem_zalloc(sizeof(struct sched_param), KM_SLEEP);
+
sp->sched_priority = t->l_priority;
sp->sched_class = t->l_class;
lwp_unlock(t);
@@ -268,7 +271,7 @@ sys__sched_setaffinity(struct lwp *l,
syscallarg(size_t) size;
syscallarg(void *) cpuset;
} */
- cpuset_t *cpuset;
+ cpuset_t *cpuset = NULL;
struct cpu_info *ci = NULL;
struct proc *p;
struct lwp *t;
@@ -277,12 +280,6 @@ sys__sched_setaffinity(struct lwp *l,
u_int lcnt;
int error;
- /* Available only for super-user */
- if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
- l->l_proc, KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_SETAFFINITY), NULL,
- NULL))
- return EPERM;
-
if (SCARG(uap, size) <= 0)
return EINVAL;
@@ -290,17 +287,8 @@ sys__sched_setaffinity(struct lwp *l,
cpuset = kmem_zalloc(sizeof(cpuset_t), KM_SLEEP);
error = copyin(SCARG(uap, cpuset), cpuset,
min(SCARG(uap, size), sizeof(cpuset_t)));
- if (error)
+ if (error) {
goto error;
-
- /* Look for a CPU in the set */
- for (CPU_INFO_FOREACH(cii, ci))
- if (CPU_ISSET(cpu_index(ci), cpuset))
- break;
- if (ci == NULL) {
- /* Empty set */
- kmem_free(cpuset, sizeof(cpuset_t));
- cpuset = NULL;
}
if (SCARG(uap, pid) != 0) {
@@ -325,6 +313,25 @@ sys__sched_setaffinity(struct lwp *l,
goto error;
}
+ /* Available only for super-user */
+ if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER, p,
+ KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_SETAFFINITY), NULL,
+ NULL)) {
+ mutex_exit(&p->p_smutex);
+ error = EPERM;
+ goto error;
+ }
+
+ /* Look for a CPU in the set */
+ for (CPU_INFO_FOREACH(cii, ci))
+ if (CPU_ISSET(cpu_index(ci), cpuset))
+ break;
+ if (ci == NULL) {
+ /* Empty set */
+ kmem_free(cpuset, sizeof(cpuset_t));
+ cpuset = NULL;
+ }
+
/* Find the LWP(s) */
lcnt = 0;
lid = SCARG(uap, lid);
@@ -375,13 +382,6 @@ sys__sched_getaffinity(struct lwp *l,
if (SCARG(uap, size) <= 0)
return EINVAL;
- if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
- l->l_proc, KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_GETAFFINITY), NULL,
- NULL))
- return EPERM;
-
- cpuset = kmem_zalloc(sizeof(cpuset_t), KM_SLEEP);
-
/* If not specified, use the first LWP */
lid = SCARG(uap, lid) == 0 ? 1 : SCARG(uap, lid);
@@ -398,9 +398,16 @@ sys__sched_getaffinity(struct lwp *l,
mutex_exit(&p->p_smutex);
}
if (t == NULL) {
- kmem_free(cpuset, sizeof(cpuset_t));
return ESRCH;
}
+
+ if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SCHEDULER,
+ t->l_proc, KAUTH_ARG(KAUTH_REQ_PROCESS_SCHEDULER_GETAFFINITY), NULL,
+ NULL))
+ return EPERM;
+
+ cpuset = kmem_zalloc(sizeof(cpuset_t), KM_SLEEP);
+
if (t->l_flag & LW_AFFINITY)
memcpy(cpuset, &t->l_affinity, sizeof(cpuset_t));
lwp_unlock(t);
Home |
Main Index |
Thread Index |
Old Index