Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm centralize calls from UVM to radixtree into a few fu...
details: https://anonhg.NetBSD.org/src/rev/d529e89fcfcb
branches: trunk
changeset: 937275:d529e89fcfcb
user: chs <chs%NetBSD.org@localhost>
date: Fri Aug 14 09:06:14 2020 +0000
description:
centralize calls from UVM to radixtree into a few functions.
in those functions, assert that the object lock is held in
the correct mode.
diffstat:
sys/miscfs/genfs/genfs_io.c | 30 ++++--------
sys/uvm/uvm_extern.h | 11 +++-
sys/uvm/uvm_object.c | 101 +++++++++++++++++++++++++++++++++++++++++++-
sys/uvm/uvm_object.h | 6 +-
sys/uvm/uvm_page.c | 25 ++++++---
sys/uvm/uvm_page_status.c | 21 +++-----
sys/uvm/uvm_pager.c | 10 +--
sys/uvm/uvm_vnode.c | 19 ++------
8 files changed, 155 insertions(+), 68 deletions(-)
diffs (truncated from 503 to 300 lines):
diff -r eb08055db7f5 -r d529e89fcfcb sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c Fri Aug 14 08:46:54 2020 +0000
+++ b/sys/miscfs/genfs/genfs_io.c Fri Aug 14 09:06:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $ */
+/* $NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -913,8 +913,7 @@
* shortcut if we have no pages to process.
*/
- nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
- UVM_PAGE_DIRTY_TAG);
+ nodirty = uvm_obj_clean_p(uobj);
#ifdef DIAGNOSTIC
mutex_enter(vp->v_interlock);
KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
@@ -922,9 +921,8 @@
#endif
if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
mutex_enter(vp->v_interlock);
- if (vp->v_iflag & VI_ONWORKLST) {
- if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
- vn_syncer_remove_from_worklist(vp);
+ if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) {
+ vn_syncer_remove_from_worklist(vp);
}
mutex_exit(vp->v_interlock);
if (trans_mp) {
@@ -978,8 +976,7 @@
}
error = 0;
- wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
- UVM_PAGE_WRITEBACK_TAG);
+ wasclean = uvm_obj_nowriteback_p(uobj);
nextoff = startoff;
if (endoff == 0 || flags & PGO_ALLPAGES) {
endoff = trunc_page(LLONG_MAX);
@@ -1030,8 +1027,7 @@
KASSERT(pg->offset >= nextoff);
KASSERT(!dirtyonly ||
uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
- radix_tree_get_tag(&uobj->uo_pages,
- pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
+ uvm_obj_page_writeback_p(pg));
if (pg->offset >= endoff) {
break;
@@ -1245,9 +1241,7 @@
* mark pages as WRITEBACK so that concurrent
* fsync can find and wait for our activities.
*/
- radix_tree_set_tag(&uobj->uo_pages,
- pgs[i]->offset >> PAGE_SHIFT,
- UVM_PAGE_WRITEBACK_TAG);
+ uvm_obj_page_set_writeback(pgs[i]);
}
if (tpg->offset < startoff || tpg->offset >= endoff)
continue;
@@ -1332,11 +1326,9 @@
* syncer list.
*/
- if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
- radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
- UVM_PAGE_DIRTY_TAG)) {
- if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
- vn_syncer_remove_from_worklist(vp);
+ if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) &&
+ LIST_EMPTY(&vp->v_dirtyblkhd)) {
+ vn_syncer_remove_from_worklist(vp);
}
#if !defined(DEBUG)
diff -r eb08055db7f5 -r d529e89fcfcb sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Fri Aug 14 08:46:54 2020 +0000
+++ b/sys/uvm/uvm_extern.h Fri Aug 14 09:06:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.230 2020/06/14 22:25:15 ad Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.231 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -776,6 +776,14 @@
int uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
struct pglist *);
void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
+bool uvm_obj_clean_p(struct uvm_object *);
+bool uvm_obj_nowriteback_p(struct uvm_object *);
+bool uvm_obj_page_dirty_p(struct vm_page *);
+void uvm_obj_page_set_dirty(struct vm_page *);
+void uvm_obj_page_clear_dirty(struct vm_page *);
+bool uvm_obj_page_writeback_p(struct vm_page *);
+void uvm_obj_page_set_writeback(struct vm_page *);
+void uvm_obj_page_clear_writeback(struct vm_page *);
/* uvm_page.c */
int uvm_availmem(bool);
@@ -826,7 +834,6 @@
unsigned int *, struct vm_page **,
struct uvm_page_array *, unsigned int);
bool uvn_text_p(struct uvm_object *);
-bool uvn_clean_p(struct uvm_object *);
bool uvn_needs_writefault_p(struct uvm_object *);
/* kern_malloc.c */
diff -r eb08055db7f5 -r d529e89fcfcb sys/uvm/uvm_object.c
--- a/sys/uvm/uvm_object.c Fri Aug 14 08:46:54 2020 +0000
+++ b/sys/uvm/uvm_object.c Fri Aug 14 09:06:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $ */
+/* $NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@@ -233,6 +233,103 @@
rw_exit(uobj->vmobjlock);
}
+static inline bool
+uvm_obj_notag_p(struct uvm_object *uobj, int tag)
+{
+
+ KASSERT(rw_lock_held(uobj->vmobjlock));
+ return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, tag);
+}
+
+bool
+uvm_obj_clean_p(struct uvm_object *uobj)
+{
+
+ return uvm_obj_notag_p(uobj, UVM_PAGE_DIRTY_TAG);
+}
+
+bool
+uvm_obj_nowriteback_p(struct uvm_object *uobj)
+{
+
+ return uvm_obj_notag_p(uobj, UVM_PAGE_WRITEBACK_TAG);
+}
+
+static inline bool
+uvm_obj_page_tag_p(struct vm_page *pg, int tag)
+{
+ struct uvm_object *uobj = pg->uobject;
+ int pgidx = pg->offset >> PAGE_SHIFT;
+
+ KASSERT(uobj != NULL);
+ KASSERT(rw_lock_held(uobj->vmobjlock));
+ return radix_tree_get_tag(&uobj->uo_pages, pgidx, tag) != 0;
+}
+
+static inline void
+uvm_obj_page_set_tag(struct vm_page *pg, int tag)
+{
+ struct uvm_object *uobj = pg->uobject;
+ int pgidx = pg->offset >> PAGE_SHIFT;
+
+ KASSERT(uobj != NULL);
+ KASSERT(rw_write_held(uobj->vmobjlock));
+ radix_tree_set_tag(&uobj->uo_pages, pgidx, tag);
+}
+
+static inline void
+uvm_obj_page_clear_tag(struct vm_page *pg, int tag)
+{
+ struct uvm_object *uobj = pg->uobject;
+ int pgidx = pg->offset >> PAGE_SHIFT;
+
+ KASSERT(uobj != NULL);
+ KASSERT(rw_write_held(uobj->vmobjlock));
+ radix_tree_clear_tag(&uobj->uo_pages, pgidx, tag);
+}
+
+bool
+uvm_obj_page_dirty_p(struct vm_page *pg)
+{
+
+ return uvm_obj_page_tag_p(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+void
+uvm_obj_page_set_dirty(struct vm_page *pg)
+{
+
+ uvm_obj_page_set_tag(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+void
+uvm_obj_page_clear_dirty(struct vm_page *pg)
+{
+
+ uvm_obj_page_clear_tag(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+bool
+uvm_obj_page_writeback_p(struct vm_page *pg)
+{
+
+ return uvm_obj_page_tag_p(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
+void
+uvm_obj_page_set_writeback(struct vm_page *pg)
+{
+
+ uvm_obj_page_set_tag(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
+void
+uvm_obj_page_clear_writeback(struct vm_page *pg)
+{
+
+ uvm_obj_page_clear_tag(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
#if defined(DDB) || defined(DEBUGPRINT)
/*
diff -r eb08055db7f5 -r d529e89fcfcb sys/uvm/uvm_object.h
--- a/sys/uvm/uvm_object.h Fri Aug 14 08:46:54 2020 +0000
+++ b/sys/uvm/uvm_object.h Fri Aug 14 09:06:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $ */
+/* $NetBSD: uvm_object.h,v 1.39 2020/08/14 09:06:15 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -104,7 +104,7 @@
(UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
#define UVM_OBJ_IS_CLEAN(uobj) \
- (UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
+ (UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
/*
* UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification.
@@ -114,7 +114,7 @@
*/
#define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \
- (UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
+ (UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
#define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager)
diff -r eb08055db7f5 -r d529e89fcfcb sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Fri Aug 14 08:46:54 2020 +0000
+++ b/sys/uvm/uvm_page.c Fri Aug 14 09:06:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $ */
+/* $NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $ */
/*-
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -240,15 +240,17 @@
const uint64_t idx = pg->offset >> PAGE_SHIFT;
int error;
+ KASSERT(rw_write_held(uobj->vmobjlock));
+
error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
if (error != 0) {
return error;
}
if ((pg->flags & PG_CLEAN) == 0) {
- radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG);
+ uvm_obj_page_set_dirty(pg);
}
KASSERT(((pg->flags & PG_CLEAN) == 0) ==
Home |
Main Index |
Thread Index |
Old Index