Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86_64/x86_64 MTRR support for x86_64. Will be shar...



details:   https://anonhg.NetBSD.org/src/rev/238f56a70097
branches:  trunk
changeset: 532958:238f56a70097
user:      fvdl <fvdl%NetBSD.org@localhost>
date:      Tue Jun 18 08:30:33 2002 +0000

description:
MTRR support for x86_64. Will be shared with i386 later.

diffstat:

 sys/arch/x86_64/x86_64/mtrr.c |  702 ++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 702 insertions(+), 0 deletions(-)

diffs (truncated from 706 to 300 lines):

diff -r ebe7b71e9d3b -r 238f56a70097 sys/arch/x86_64/x86_64/mtrr.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/x86_64/x86_64/mtrr.c     Tue Jun 18 08:30:33 2002 +0000
@@ -0,0 +1,702 @@
+/*     $NetBSD: mtrr.c,v 1.1 2002/06/18 08:30:33 fvdl Exp $ */
+
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Bill Sommerfeld.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *        This product includes software developed by the NetBSD
+ *        Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: mtrr.c,v 1.1 2002/06/18 08:30:33 fvdl Exp $");
+
+#include "opt_multiprocessor.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/user.h>
+#include <sys/malloc.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/specialreg.h>
+#include <machine/cpufunc.h>
+#include <machine/mtrr.h>
+
+extern paddr_t avail_end;
+
+static void i686_mtrr_reload(int);
+static void i686_mtrr_init_cpu(struct cpu_info *);
+static void i686_mtrr_reload_cpu(struct cpu_info *);
+static void i686_mtrr_clean(struct proc *p);
+static int i686_mtrr_set(struct mtrr *, int *n, struct proc *p, int flags);
+static int i686_mtrr_get(struct mtrr *, int *n, struct proc *p, int flags);
+static void i686_mtrr_dump(const char *tag);
+
+static int i686_mtrr_validate(struct mtrr *, struct proc *p);
+static void i686_soft2raw(void);
+static void i686_raw2soft(void);
+static void i686_mtrr_commit(void);
+static int i686_mtrr_setone(struct mtrr *, struct proc *p);
+
+
+static struct mtrr_state
+mtrr_raw[] = {
+       { MSR_MTRRphysBase0 },
+       { MSR_MTRRphysMask0 },
+       { MSR_MTRRphysBase1 },
+       { MSR_MTRRphysMask1 },
+       { MSR_MTRRphysBase2 },
+       { MSR_MTRRphysMask2 },
+       { MSR_MTRRphysBase3 },
+       { MSR_MTRRphysMask3 },
+       { MSR_MTRRphysBase4 },
+       { MSR_MTRRphysMask4 },
+       { MSR_MTRRphysBase5 },
+       { MSR_MTRRphysMask5 },
+       { MSR_MTRRphysBase6 },
+       { MSR_MTRRphysMask6 },
+       { MSR_MTRRphysBase7 },
+       { MSR_MTRRphysMask7 },
+       { MSR_MTRRfix64K_00000 },
+       { MSR_MTRRfix16K_80000 },
+       { MSR_MTRRfix16K_A0000 },
+       { MSR_MTRRfix4K_C0000 },
+       { MSR_MTRRfix4K_C8000 },
+       { MSR_MTRRfix4K_D0000 },
+       { MSR_MTRRfix4K_D8000 },
+       { MSR_MTRRfix4K_E0000 },
+       { MSR_MTRRfix4K_E8000 },
+       { MSR_MTRRfix4K_F0000 },
+       { MSR_MTRRfix4K_F8000 },
+       { MSR_MTRRdefType }
+};
+
+static const int nmtrr_raw = sizeof(mtrr_raw)/sizeof(mtrr_raw[0]);
+
+static struct mtrr_state *mtrr_var_raw;
+static struct mtrr_state *mtrr_fixed_raw;
+
+static struct mtrr *mtrr_fixed;
+static struct mtrr *mtrr_var;
+
+struct mtrr_funcs i686_mtrr_funcs = {
+       i686_mtrr_init_cpu,
+       i686_mtrr_reload_cpu,
+       i686_mtrr_clean,
+       i686_mtrr_set,
+       i686_mtrr_get,
+       i686_mtrr_commit,
+       i686_mtrr_dump
+};
+
+#ifdef MULTIPROCESSOR
+static volatile uint32_t mtrr_waiting;
+#endif
+
+static uint64_t i686_mtrr_cap;
+
+static void
+i686_mtrr_dump(const char *tag)
+{
+       int i;
+
+       for (i = 0; i < nmtrr_raw; i++)
+               printf("%s: %x: %016lx\n",
+                   tag, mtrr_raw[i].msraddr, rdmsr(mtrr_raw[i].msraddr));
+}
+
+/*
+ * The Intel Archicture Software Developer's Manual volume 3 (systems
+ * programming) section 9.12.8 describes a simple 15-step process for
+ * updating the MTRR's on all processors on a multiprocessor system.
+ * If synch is nonzero, assume we're being called from an IPI handler,
+ * and synchronize with all running processors.
+ */
+
+/*
+ * 1. Broadcast to all processor to execute the following code sequence.
+ */
+
+static void
+i686_mtrr_reload(int synch)
+{
+       int i;
+       uint32_t cr0, cr3, cr4;
+       uint32_t origcr0, origcr4;
+#ifdef MULTIPROCESSOR
+       uint32_t mymask = 1 << cpu_number();
+#endif
+       
+       /*
+        * 2. Disable interrupts
+        */
+
+       disable_intr();
+       
+#ifdef MULTIPROCESSOR
+       if (synch) {
+               /*
+                * 3. Wait for all processors to reach this point.
+                */
+
+               i386_atomic_setbits_l(&mtrr_waiting, mymask);
+
+               while (mtrr_waiting != cpus_running)
+                       DELAY(10);
+       }
+#endif
+       
+       /*
+        * 4. Enter the no-fill cache mode (set the CD flag in CR0 to 1 and
+        * the NW flag to 0)
+        */
+
+       origcr0 = cr0 = rcr0();
+       cr0 |= CR0_CD;
+       cr0 &= ~CR0_NW;
+       lcr0(cr0);
+       
+       /*
+        * 5. Flush all caches using the WBINVD instruction.
+        */
+
+       wbinvd();
+       
+       /*
+        * 6. Clear the PGE flag in control register CR4 (if set).
+        */
+
+       origcr4 = cr4 = rcr4();
+       cr4 &= ~CR4_PGE;
+       lcr4(cr4);
+       
+       /*
+        * 7. Flush all TLBs (execute a MOV from control register CR3
+        * to another register and then a move from that register back
+        * to CR3)
+        */
+
+       cr3 = rcr3();
+       lcr3(cr3);
+       
+       /*
+        * 8. Disable all range registers (by clearing the E flag in
+        * register MTRRdefType.  If only variable ranges are being
+        * modified, software may clear the valid bits for the
+        * affected register pairs instead.
+        */
+       /* disable MTRRs (E = 0) */
+       wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_I686_ENABLE_MASK);
+       
+       /*
+        * 9. Update the MTRR's
+        */
+
+       for (i = 0; i < nmtrr_raw; i++) {
+               uint64_t val = mtrr_raw[i].msrval;
+               uint32_t addr = mtrr_raw[i].msraddr;
+               if (addr == MSR_MTRRdefType)
+                       val &= ~MTRR_I686_ENABLE_MASK;
+               wrmsr(addr, val);
+       }
+       
+       /*
+        * 10. Enable all range registers (by setting the E flag in
+        * register MTRRdefType).  If only variable-range registers
+        * were modified and their individual valid bits were cleared,
+        * then set the valid bits for the affected ranges instead.
+        */
+
+       wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_I686_ENABLE_MASK);
+       
+       /*
+        * 11. Flush all caches and all TLB's a second time. (repeat
+        * steps 5, 7)
+        */
+
+       wbinvd();
+       lcr3(cr3);
+
+       /*
+        * 12. Enter the normal cache mode to reenable caching (set the CD and
+        * NW flags in CR0 to 0)
+        */
+
+       lcr0(origcr0);
+
+       /*
+        * 13. Set the PGE flag in control register CR4, if previously
+        * cleared.
+        */
+
+       lcr4(origcr4);
+
+#ifdef MULTIPROCESSOR
+       if (synch) {
+               /*
+                * 14. Wait for all processors to reach this point.
+                */
+               i386_atomic_clearbits_l(&mtrr_waiting, mymask);
+
+               while (mtrr_waiting != 0)
+                       DELAY(10);
+       }
+#endif
+
+       /*
+        * 15. Enable interrupts.
+        */
+       enable_intr();
+}
+
+static void
+i686_mtrr_reload_cpu(struct cpu_info *ci)
+{
+       i686_mtrr_reload(1);
+}
+
+void
+i686_mtrr_init_first(void)
+{
+       int i;
+
+       for (i = 0; i < nmtrr_raw; i++)
+               mtrr_raw[i].msrval = rdmsr(mtrr_raw[i].msraddr);



Home | Main Index | Thread Index | Old Index