Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch per-cpu shadow directory pages should be updated lo...
details: https://anonhg.NetBSD.org/src/rev/6afcd7092fe9
branches: trunk
changeset: 772348:6afcd7092fe9
user: cherry <cherry%NetBSD.org@localhost>
date: Fri Dec 30 16:55:21 2011 +0000
description:
per-cpu shadow directory pages should be updated locally via cross-calls. Do this.
diffstat:
sys/arch/x86/x86/pmap.c | 52 ++--------------
sys/arch/xen/include/xenpmap.h | 6 +-
sys/arch/xen/x86/cpu.c | 5 +-
sys/arch/xen/x86/xen_pmap.c | 124 ++++++++++++++++++++++++++++++++++++++++-
4 files changed, 137 insertions(+), 50 deletions(-)
diffs (280 lines):
diff -r 1a04c0f3de6d -r 6afcd7092fe9 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Fri Dec 30 14:32:31 2011 +0000
+++ b/sys/arch/x86/x86/pmap.c Fri Dec 30 16:55:21 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.147 2011/12/09 17:32:51 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $ */
/*-
* Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.147 2011/12/09 17:32:51 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -1915,16 +1915,7 @@
* Update the per-cpu PD on all cpus the current
* pmap is active on
*/
- CPU_INFO_ITERATOR cii;
- struct cpu_info *ci;
- for (CPU_INFO_FOREACH(cii, ci)) {
- if (ci == NULL) {
- continue;
- }
- if (ci->ci_cpumask & pmap->pm_cpus) {
- pmap_pte_set(&ci->ci_kpm_pdir[index], 0);
- }
- }
+ xen_kpm_sync(pmap, index);
}
# endif /*__x86_64__ */
invaladdr = level == 1 ? (vaddr_t)ptes :
@@ -2029,17 +2020,7 @@
* Update the per-cpu PD on all cpus the current
* pmap is active on
*/
- CPU_INFO_ITERATOR cii;
- struct cpu_info *ci;
- for (CPU_INFO_FOREACH(cii, ci)) {
- if (ci == NULL) {
- continue;
- }
- if (ci->ci_cpumask & pmap->pm_cpus) {
- pmap_pte_set(&ci->ci_kpm_pdir[index],
- (pd_entry_t) (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V));
- }
- }
+ xen_kpm_sync(pmap, index);
}
#endif /* XEN && __x86_64__ */
pmap_pte_flush();
@@ -4247,33 +4228,14 @@
pmap_get_physpage(va, level - 1, &pa);
pte = pmap_pa2pte(pa) | PG_k | PG_V | PG_RW;
#ifdef XEN
- switch (level) {
- case PTP_LEVELS:
+ xpq_queue_pte_update(xpmap_ptetomach(&pdep[i]), pte);
+ if (level == PTP_LEVELS) {
#if defined(PAE) || defined(__x86_64__)
if (i >= PDIR_SLOT_KERN) {
/* update per-cpu PMDs on all cpus */
- CPU_INFO_ITERATOR cii;
- struct cpu_info *ci;
- for (CPU_INFO_FOREACH(cii, ci)) {
- if (ci == NULL) {
- continue;
- }
-#ifdef PAE
- xpq_queue_pte_update(
- xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(i)]), pte);
-#elif defined(__x86_64__)
- xpq_queue_pte_update(
- xpmap_ptetomach(&ci->ci_kpm_pdir[i]), pte);
-#endif /* PAE */
- }
+ xen_kpm_sync(pmap_kernel(), i);
}
#endif /* PAE || __x86_64__ */
- /* FALLTHROUGH */
-
- default: /* All other levels */
- xpq_queue_pte_update(
- xpmap_ptetomach(&pdep[i]),
- pte);
}
#else /* XEN */
pdep[i] = pte;
diff -r 1a04c0f3de6d -r 6afcd7092fe9 sys/arch/xen/include/xenpmap.h
--- a/sys/arch/xen/include/xenpmap.h Fri Dec 30 14:32:31 2011 +0000
+++ b/sys/arch/xen/include/xenpmap.h Fri Dec 30 16:55:21 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xenpmap.h,v 1.32 2011/11/23 00:56:56 jym Exp $ */
+/* $NetBSD: xenpmap.h,v 1.33 2011/12/30 16:55:21 cherry Exp $ */
/*
*
@@ -61,6 +61,10 @@
void pmap_unmap_recursive_entries(void);
#endif /* PAE */
+#if defined(PAE) || defined(__x86_64__)
+void xen_kpm_sync(struct pmap *, int);
+#endif /* PAE || __x86_64__ */
+
#define xpq_queue_pin_l1_table(pa) \
xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
#define xpq_queue_pin_l2_table(pa) \
diff -r 1a04c0f3de6d -r 6afcd7092fe9 sys/arch/xen/x86/cpu.c
--- a/sys/arch/xen/x86/cpu.c Fri Dec 30 14:32:31 2011 +0000
+++ b/sys/arch/xen/x86/cpu.c Fri Dec 30 16:55:21 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.71 2011/12/07 15:47:43 cegger Exp $ */
+/* $NetBSD: cpu.c,v 1.72 2011/12/30 16:55:21 cherry Exp $ */
/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */
/*-
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.71 2011/12/07 15:47:43 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.72 2011/12/30 16:55:21 cherry Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@@ -1102,6 +1102,7 @@
{
return &cpu_info_primary;
}
+/* XXX: rename to something more generic. users other than xpq exist */
struct cpu_info * (*xpq_cpu)(void) = cpu_primary;
void
diff -r 1a04c0f3de6d -r 6afcd7092fe9 sys/arch/xen/x86/xen_pmap.c
--- a/sys/arch/xen/x86/xen_pmap.c Fri Dec 30 14:32:31 2011 +0000
+++ b/sys/arch/xen/x86/xen_pmap.c Fri Dec 30 16:55:21 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_pmap.c,v 1.11 2011/12/07 15:47:43 cegger Exp $ */
+/* $NetBSD: xen_pmap.c,v 1.12 2011/12/30 16:55:21 cherry Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.11 2011/12/07 15:47:43 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.12 2011/12/30 16:55:21 cherry Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -550,3 +550,123 @@
}
#endif /* PAE */
+
+#if defined(PAE) || defined(__x86_64__)
+
+extern struct cpu_info * (*xpq_cpu)(void);
+static __inline void
+pmap_kpm_setpte(struct cpu_info *ci, int index)
+{
+#ifdef PAE
+ xpq_queue_pte_update(
+ xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
+ pmap_kernel()->pm_pdir[index]);
+#elif defined(__x86_64__)
+ xpq_queue_pte_update(
+ xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
+ pmap_kernel()->pm_pdir[index]);
+#endif /* PAE */
+}
+
+static void
+pmap_kpm_sync_xcall(void *arg1, void *arg2)
+{
+ KASSERT(arg1 != NULL);
+ KASSERT(arg2 != NULL);
+
+ struct pmap *pmap = arg1;
+ int index = *(int *)arg2;
+ struct cpu_info *ci = xpq_cpu();
+
+ if (pmap == pmap_kernel()) {
+ KASSERT(index >= PDIR_SLOT_KERN);
+ pmap_kpm_setpte(ci, index);
+ pmap_pte_flush();
+ return;
+ }
+
+#ifdef PAE
+ KASSERTMSG(false, "%s not allowed for PAE user pmaps", __func__);
+ return;
+#else /* __x86_64__ */
+
+ if (ci->ci_pmap != pmap) {
+ /* pmap changed. Nothing to do. */
+ return;
+ }
+
+ pmap_pte_set(&ci->ci_kpm_pdir[index],
+ pmap_kernel()->pm_pdir[index]);
+ pmap_pte_flush();
+#endif /* PAE || __x86_64__ */
+}
+
+/*
+ * Synchronise shadow pdir with the pmap on all cpus on which it is
+ * loaded.
+ */
+void
+xen_kpm_sync(struct pmap *pmap, int index)
+{
+ uint64_t where;
+
+ KASSERT(pmap != NULL);
+
+ pmap_pte_flush();
+
+ if (__predict_false(xpq_cpu != &x86_curcpu)) { /* Too early to xcall */
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ if (ci == NULL) {
+ continue;
+ }
+ if (pmap == pmap_kernel() ||
+ ci->ci_cpumask & pmap->pm_cpus) {
+ pmap_kpm_setpte(ci, index);
+ }
+ }
+ pmap_pte_flush();
+ return;
+ }
+
+ if (pmap == pmap_kernel()) {
+ where = xc_broadcast(XC_HIGHPRI,
+ pmap_kpm_sync_xcall, pmap, &index);
+ xc_wait(where);
+ } else {
+ KASSERT(mutex_owned(pmap->pm_lock));
+ KASSERT(kpreempt_disabled());
+
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ if (ci == NULL) {
+ continue;
+ }
+ while (ci->ci_cpumask & pmap->pm_cpus) {
+#ifdef MULTIPROCESSOR
+#define CPU_IS_CURCPU(ci) __predict_false((ci) == curcpu())
+#else /* MULTIPROCESSOR */
+#define CPU_IS_CURCPU(ci) __predict_true((ci) == curcpu())
+#endif /* MULTIPROCESSOR */
+ if (ci->ci_want_pmapload &&
+ !CPU_IS_CURCPU(ci)) {
+ /*
+ * XXX: make this more cpu
+ * cycle friendly/co-operate
+ * with pmap_load()
+ */
+ continue;
+ }
+
+ where = xc_unicast(XC_HIGHPRI, pmap_kpm_sync_xcall,
+ pmap, &index, ci);
+ xc_wait(where);
+ break;
+ }
+ }
+ }
+}
+
+#endif /* PAE || __x86_64__ */
Home |
Main Index |
Thread Index |
Old Index