Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/xen/x86 Reduce the size of the blocks. No functiona...



details:   https://anonhg.NetBSD.org/src/rev/0e4fb8964a2a
branches:  trunk
changeset: 991710:0e4fb8964a2a
user:      maxv <maxv%NetBSD.org@localhost>
date:      Fri Jul 27 09:37:31 2018 +0000

description:
Reduce the size of the blocks. No functional change.

diffstat:

 sys/arch/xen/x86/cpu.c |  81 ++++++++++++++++++++++---------------------------
 1 files changed, 36 insertions(+), 45 deletions(-)

diffs (134 lines):

diff -r 32d47c22c022 -r 0e4fb8964a2a sys/arch/xen/x86/cpu.c
--- a/sys/arch/xen/x86/cpu.c    Fri Jul 27 09:22:40 2018 +0000
+++ b/sys/arch/xen/x86/cpu.c    Fri Jul 27 09:37:31 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.c,v 1.124 2018/07/26 17:20:09 maxv Exp $   */
+/*     $NetBSD: cpu.c,v 1.125 2018/07/27 09:37:31 maxv Exp $   */
 
 /*-
  * Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.124 2018/07/26 17:20:09 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.125 2018/07/27 09:37:31 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1114,6 +1114,7 @@
 {
        struct cpu_info *ci = curcpu();
        cpuid_t cid = cpu_index(ci);
+       int i;
 
        KASSERT(pmap != pmap_kernel());
 
@@ -1121,50 +1122,40 @@
        /* make new pmap visible to xen_kpm_sync() */
        kcpuset_atomic_set(pmap->pm_xen_ptp_cpus, cid);
 
-#ifdef i386
-       {
-               int i;
-               paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
-               /* don't update the kernel L3 slot */
-               for (i = 0 ; i < PDP_SIZE - 1; i++) {
-                       xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
-                           xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
+#ifdef __x86_64__
+       pd_entry_t *new_pgd;
+       paddr_t l4_pd_ma;
+
+       l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
+
+       /*
+        * Map user space address in kernel space and load
+        * user cr3
+        */
+       new_pgd = pmap->pm_pdir;
+       KASSERT(pmap == ci->ci_pmap);
+
+       /* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
+       for (i = 0; i < PDIR_SLOT_PTE; i++) {
+               KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
+               if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
+                       xpq_queue_pte_update(l4_pd_ma + i * sizeof(pd_entry_t),
+                           new_pgd[i]);
                }
-               tlbflush();
+       }
+
+       xen_set_user_pgd(pmap_pdirpa(pmap, 0));
+       ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
+#else
+       paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
+       /* don't update the kernel L3 slot */
+       for (i = 0; i < PDP_SIZE - 1; i++) {
+               xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
+                   xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
        }
 #endif
 
-#ifdef __x86_64__
-       {
-               int i;
-               pd_entry_t *new_pgd;
-               paddr_t l4_pd_ma;
-
-               l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
-
-               /*
-                * Map user space address in kernel space and load
-                * user cr3
-                */
-               new_pgd = pmap->pm_pdir;
-               KASSERT(pmap == ci->ci_pmap);
-
-               /* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
-               for (i = 0; i < PDIR_SLOT_PTE; i++) {
-                       KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
-                       if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
-                               xpq_queue_pte_update(
-                                   l4_pd_ma + i * sizeof(pd_entry_t),
-                                   new_pgd[i]);
-                       }
-               }
-
-               xen_set_user_pgd(pmap_pdirpa(pmap, 0));
-               ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
-
-               tlbflush();
-       }
-#endif
+       tlbflush();
 
        /* old pmap no longer visible to xen_kpm_sync() */
        if (oldpmap != pmap_kernel()) {
@@ -1195,6 +1186,8 @@
 void
 pmap_cpu_init_late(struct cpu_info *ci)
 {
+       int i;
+
        /*
         * The BP has already its own PD page allocated during early
         * MD startup.
@@ -1202,7 +1195,6 @@
 
 #ifdef __x86_64__
        /* Setup per-cpu normal_pdes */
-       int i;
        extern pd_entry_t * const normal_pdes[];
        for (i = 0;i < PTP_LEVELS - 1;i++) {
                ci->ci_normal_pdes[i] = normal_pdes[i];
@@ -1219,8 +1211,7 @@
        KASSERT(ci->ci_pae_l3_pdirpa != 0);
 
        /* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */
-       int i;
-       for (i = 0 ; i < PDP_SIZE - 1; i++) {
+       for (i = 0; i < PDP_SIZE - 1; i++) {
                ci->ci_pae_l3_pdir[i] =
                    xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PG_V;
        }



Home | Main Index | Thread Index | Old Index