Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/compat/linux/common PR/50021: Rin Okuyama: Fix linux aff...



details:   https://anonhg.NetBSD.org/src/rev/1e637a27d91b
branches:  trunk
changeset: 809332:1e637a27d91b
user:      christos <christos%NetBSD.org@localhost>
date:      Fri Jul 03 02:24:28 2015 +0000

description:
PR/50021: Rin Okuyama: Fix linux affinity syscalls
XXX: pullup-7

diffstat:

 sys/compat/linux/common/linux_sched.c |  89 ++++++++++++++++++++--------------
 1 files changed, 51 insertions(+), 38 deletions(-)

diffs (139 lines):

diff -r f260c7fdeab5 -r 1e637a27d91b sys/compat/linux/common/linux_sched.c
--- a/sys/compat/linux/common/linux_sched.c     Fri Jul 03 01:00:59 2015 +0000
+++ b/sys/compat/linux/common/linux_sched.c     Fri Jul 03 02:24:28 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: linux_sched.c,v 1.67 2014/11/09 17:48:08 maxv Exp $    */
+/*     $NetBSD: linux_sched.c,v 1.68 2015/07/03 02:24:28 christos Exp $        */
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.67 2014/11/09 17:48:08 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.68 2015/07/03 02:24:28 christos Exp $");
 
 #include <sys/param.h>
 #include <sys/mount.h>
@@ -65,6 +65,9 @@
 static int linux_clone_nptl(struct lwp *, const struct linux_sys_clone_args *,
     register_t *);
 
+/* Unlike Linux, dynamically calculate CPU mask size */
+#define        LINUX_CPU_MASK_SIZE (sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT))
+
 #if DEBUG_LINUX
 #define DPRINTF(x) uprintf x
 #else
@@ -627,6 +630,10 @@
        return 0;
 }
 
+/*
+ * The affinity syscalls assume that the layout of our cpu kcpuset is
+ * the same as linux's: a linear bitmask.
+ */
 int
 linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval)
 {
@@ -635,39 +642,45 @@
                syscallarg(unsigned int) len;
                syscallarg(unsigned long *) mask;
        } */
-       proc_t *p;
-       unsigned long *lp, *data;
-       int error, size, nb = ncpu;
+       struct lwp *t;
+       kcpuset_t *kcset;
+       size_t size;
+       cpuid_t i;
+       int error;
 
-       /* Unlike Linux, dynamically calculate cpu mask size */
-       size = sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT);
+       size = LINUX_CPU_MASK_SIZE;
        if (SCARG(uap, len) < size)
                return EINVAL;
 
-       /* XXX: Pointless check.  TODO: Actually implement this. */
-       mutex_enter(proc_lock);
-       p = proc_find(SCARG(uap, pid));
-       mutex_exit(proc_lock);
-       if (p == NULL) {
+       /* Lock the LWP */
+       t = lwp_find2(SCARG(uap, pid), l->l_lid);
+       if (t == NULL)
                return ESRCH;
+
+       /* Check the permission */
+       if (kauth_authorize_process(l->l_cred,
+           KAUTH_PROCESS_SCHEDULER_GETAFFINITY, t->l_proc, NULL, NULL, NULL)) {
+               mutex_exit(t->l_proc->p_lock);
+               return EPERM;
        }
 
-       /* 
-        * return the actual number of CPU, tag all of them as available 
-        * The result is a mask, the first CPU being in the least significant
-        * bit.
-        */
-       data = kmem_zalloc(size, KM_SLEEP);
-       lp = data;
-       while (nb > LONG_BIT) {
-               *lp++ = ~0UL;
-               nb -= LONG_BIT;
+       kcpuset_create(&kcset, true);
+       lwp_lock(t);
+       if (t->l_affinity != NULL)
+               kcpuset_copy(kcset, t->l_affinity);
+       else {
+               /*
+                * All available CPUs should be masked when affinity has not
+                * been set.
+                */
+               kcpuset_zero(kcset);
+               for (i = 0; i < ncpu; i++)
+                       kcpuset_set(kcset, i);
        }
-       if (nb)
-               *lp = (1 << ncpu) - 1;
-
-       error = copyout(data, SCARG(uap, mask), size);
-       kmem_free(data, size);
+       lwp_unlock(t);
+       mutex_exit(t->l_proc->p_lock);
+       error = kcpuset_copyout(kcset, (cpuset_t *)SCARG(uap, mask), size);
+       kcpuset_unuse(kcset, NULL);
        *retval = size;
        return error;
 }
@@ -680,17 +693,17 @@
                syscallarg(unsigned int) len;
                syscallarg(unsigned long *) mask;
        } */
-       proc_t *p;
+       struct sys__sched_setaffinity_args ssa;
+       size_t size;
+
+       size = LINUX_CPU_MASK_SIZE;
+       if (SCARG(uap, len) < size)
+               return EINVAL;
 
-       /* XXX: Pointless check.  TODO: Actually implement this. */
-       mutex_enter(proc_lock);
-       p = proc_find(SCARG(uap, pid));
-       mutex_exit(proc_lock);
-       if (p == NULL) {
-               return ESRCH;
-       }
+       SCARG(&ssa, pid) = SCARG(uap, pid);
+       SCARG(&ssa, lid) = l->l_lid;
+       SCARG(&ssa, size) = size;
+       SCARG(&ssa, cpuset) = (cpuset_t *)SCARG(uap, mask);
 
-       /* Let's ignore it */
-       DPRINTF(("%s\n", __func__));
-       return 0;
+       return sys__sched_setaffinity(l, &ssa, retval);
 }



Home | Main Index | Thread Index | Old Index