Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/tests/fs/vfs Don't assume that "first lock that blocks" is a...
details: https://anonhg.NetBSD.org/src/rev/1fb3daad73ce
branches: trunk
changeset: 767582:1fb3daad73ce
user: alnsn <alnsn%NetBSD.org@localhost>
date: Sat Jul 23 09:59:14 2011 +0000
description:
Don't assume that "first lock that blocks" is a lock with the lowest
start offset and change the test to work when F_GETLK returns any
lock that blocks.
diffstat:
tests/fs/vfs/t_vnops.c | 146 ++++++++++++++++++++++++++++++++----------------
1 files changed, 96 insertions(+), 50 deletions(-)
diffs (206 lines):
diff -r 20855b69bd17 -r 1fb3daad73ce tests/fs/vfs/t_vnops.c
--- a/tests/fs/vfs/t_vnops.c Sat Jul 23 09:03:38 2011 +0000
+++ b/tests/fs/vfs/t_vnops.c Sat Jul 23 09:59:14 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: t_vnops.c,v 1.25 2011/07/20 11:52:00 hannken Exp $ */
+/* $NetBSD: t_vnops.c,v 1.26 2011/07/23 09:59:14 alnsn Exp $ */
/*-
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -636,6 +636,57 @@
return a < b ? -1 : (a > b ? 1 : 0);
}
+/*
+ * Find all locks set by fcntl_getlock_pids test
+ * using GETLK for a range [start, start+end], and,
+ * if there is a blocking lock, recursively find
+ * all locks to the left (toward the beginning of
+ * a file) and to the right of the lock.
+ * The function also understands "until end of file"
+ * convention when len==0.
+ */
+static unsigned int
+fcntl_getlocks(int fildes, off_t start, off_t len,
+ struct flock *lock, struct flock *end)
+{
+ unsigned int rv = 0;
+ const struct flock l = { start, len, 0, F_RDLCK, SEEK_SET };
+
+ if (lock == end)
+ return rv;
+
+ RL(rump_sys_fcntl(fildes, F_GETLK, &l));
+
+ if (l.l_type == F_UNLCK)
+ return rv;
+
+ *lock++ = l;
+ rv += 1;
+
+ ATF_REQUIRE(l.l_whence == SEEK_SET);
+
+ if (l.l_start > start) {
+ unsigned int n =
+ fcntl_getlocks(fildes, start, l.l_start - start, lock, end);
+ rv += n;
+ lock += n;
+ if (lock == end)
+ return rv;
+ }
+
+ if (l.l_len == 0) /* does l spans until the end? */
+ return rv;
+
+ if (len == 0) /* are we looking for locks until the end? */ {
+ rv += fcntl_getlocks(fildes, l.l_start + l.l_len, len, lock, end);
+ } else if (l.l_start + l.l_len < start + len) {
+ len -= l.l_start + l.l_len - start;
+ rv += fcntl_getlocks(fildes, l.l_start + l.l_len, len, lock, end);
+ }
+
+ return rv;
+}
+
static void
fcntl_getlock_pids(const atf_tc_t *tc, const char *mp)
{
@@ -648,9 +699,13 @@
{ 4, 3, 0, F_WRLCK, SEEK_SET },
};
- int fd[4];
- struct lwp *lwp[4];
- pid_t prevpid = 0;
+ /* Add extra element to make sure recursion does't stop at array end */
+ struct flock result[5];
+
+ /* Add 5th process */
+ int fd[5];
+ pid_t pid[5];
+ struct lwp *lwp[5];
unsigned int i, j;
const off_t sz = 8192;
@@ -658,7 +713,6 @@
int oflags = O_RDWR | O_CREAT;
memcpy(expect, lock, sizeof(lock));
- qsort(expect, __arraycount(expect), sizeof(expect[0]), &flock_compare);
FSTEST_ENTER();
@@ -667,68 +721,60 @@
* file. Note that the third and fourth processes lock in
* "reverse" order, i.e. the greater pid locks a range before
* the lesser pid.
+ * Then, we create 5th process which doesn't lock anything.
*/
- for(i = 0; i < __arraycount(lwp); i++) {
+ for (i = 0; i < __arraycount(lwp); i++) {
RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
lwp[i] = rump_pub_lwproc_curlwp();
- assert(rump_sys_getpid() > prevpid);
- prevpid = rump_sys_getpid();
+ pid[i] = rump_sys_getpid();
RL(fd[i] = rump_sys_open(TESTFILE, oflags, omode));
oflags = O_RDWR;
omode = 0;
RL(rump_sys_ftruncate(fd[i], sz));
- RL(rump_sys_fcntl(fd[i], F_SETLK, &lock[i]));
+
+ if (i < __arraycount(lock)) {
+ RL(rump_sys_fcntl(fd[i], F_SETLK, &lock[i]));
+ expect[i].l_pid = pid[i];
+ }
}
- atf_tc_expect_fail("PR kern/44494");
+ qsort(expect, __arraycount(expect), sizeof(expect[0]), &flock_compare);
+
/*
- * In the context of each pid , do GETLK for a readlock from
- * i = [0,__arraycount(locks)]. If we try to lock from the same
- * start offset as the lock our current process holds, check
- * that we fail on the offset of the next lock ("else if" branch).
- * Otherwise, expect to get a lock for the current offset
- * ("if" branch). The "else" branch is purely for the last
- * process where we expect no blocking locks.
+ * In the context of each process, recursively find all locks
+ * that would block the current process. Processes 1-4 don't
+ * see their own lock, we insert it to simplify checks.
+ * Process 5 sees all 4 locks.
*/
- for(i = 0; i < __arraycount(lwp); i++) {
+ for (i = 0; i < __arraycount(lwp); i++) {
+ unsigned int nlocks;
+
rump_pub_lwproc_switch(lwp[i]);
- for(j = 0; j < __arraycount(lwp); j++) {
- struct flock l;
- l = expect[j];
- l.l_len = sz;
- l.l_type = F_RDLCK;
+ memset(result, 0, sizeof(result));
+ nlocks = fcntl_getlocks(fd[i], 0, sz,
+ result, result + __arraycount(result));
- RL(rump_sys_fcntl(fd[i], F_GETLK, &l));
+ if (i < __arraycount(lock)) {
+ ATF_REQUIRE(nlocks < __arraycount(result));
+ result[nlocks] = lock[i];
+ result[nlocks].l_pid = pid[i];
+ nlocks++;
+ }
- if(expect[j].l_start != lock[i].l_start) {
- /*
- * lock set by another process
- */
- ATF_CHECK(l.l_type != F_UNLCK);
- ATF_CHECK_EQ(l.l_start, expect[j].l_start);
- ATF_CHECK_EQ(l.l_len, expect[j].l_len);
- } else if (j != __arraycount(lwp) - 1) {
- /*
- * lock set by the current process
- */
- ATF_CHECK(l.l_type != F_UNLCK);
- ATF_CHECK_EQ(l.l_start, expect[j+1].l_start);
- ATF_CHECK_EQ(l.l_len, expect[j+1].l_len);
- } else {
- /*
- * there are no other locks after the
- * current process lock
- */
- ATF_CHECK_EQ(l.l_type, F_UNLCK);
- ATF_CHECK_EQ(l.l_start, expect[j].l_start);
- ATF_CHECK_EQ(l.l_len, sz);
- ATF_CHECK_EQ(l.l_pid, expect[j].l_pid);
- ATF_CHECK_EQ(l.l_whence, expect[j].l_whence);
- }
+ ATF_CHECK_EQ(nlocks, __arraycount(expect));
+
+ qsort(result, nlocks, sizeof(result[0]), &flock_compare);
+
+ for (j = 0; j < nlocks; j++) {
+ ATF_CHECK_EQ(result[j].l_start, expect[j].l_start );
+ ATF_CHECK_EQ(result[j].l_len, expect[j].l_len );
+ ATF_CHECK_EQ(result[j].l_pid, expect[j].l_pid );
+ ATF_CHECK_EQ(result[j].l_type, expect[j].l_type );
+ ATF_CHECK_EQ(result[j].l_whence, expect[j].l_whence);
}
}
@@ -736,7 +782,7 @@
* Release processes. This also releases the fds and locks
* making fs unmount possible
*/
- for(i = 0; i < __arraycount(lwp); i++) {
+ for (i = 0; i < __arraycount(lwp); i++) {
rump_pub_lwproc_switch(lwp[i]);
rump_pub_lwproc_releaselwp();
}
Home |
Main Index |
Thread Index |
Old Index