Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Add __cpu_simple_lock_t. Use it, x86_read_psl(), and x86...



details:   https://anonhg.NetBSD.org/src/rev/0356b93c7086
branches:  trunk
changeset: 762898:0356b93c7086
user:      jruoho <jruoho%NetBSD.org@localhost>
date:      Sat Mar 05 09:47:19 2011 +0000

description:
Add __cpu_simple_lock_t. Use it, x86_read_psl(), and x86_disable_intr() to
disable interrupts locally and protect the access to APERF and MPERF. Also
rationalize the MD initialization sequence.

diffstat:

 sys/arch/x86/acpi/acpi_cpu_md.c |  206 +++++++++++++++++++++------------------
 sys/dev/acpi/acpi_cpu.c         |    6 +-
 sys/dev/acpi/acpi_cpu.h         |    7 +-
 sys/dev/acpi/acpi_cpu_pstate.c  |   16 +-
 4 files changed, 127 insertions(+), 108 deletions(-)

diffs (truncated from 427 to 300 lines):

diff -r e64f2d0fcfcd -r 0356b93c7086 sys/arch/x86/acpi/acpi_cpu_md.c
--- a/sys/arch/x86/acpi/acpi_cpu_md.c   Sat Mar 05 07:47:54 2011 +0000
+++ b/sys/arch/x86/acpi/acpi_cpu_md.c   Sat Mar 05 09:47:19 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp $ */
+/* $NetBSD: acpi_cpu_md.c,v 1.55 2011/03/05 09:47:19 jruoho Exp $ */
 
 /*-
  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen%iki.fi@localhost>
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  */
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.55 2011/03/05 09:47:19 jruoho Exp $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -52,6 +52,12 @@
 #include <machine/acpi_machdep.h>
 
 /*
+ * Intel IA32_MISC_ENABLE.
+ */
+#define MSR_MISC_ENABLE_EST    __BIT(16)
+#define MSR_MISC_ENABLE_TURBO  __BIT(38)
+
+/*
  * AMD C1E.
  */
 #define MSR_CMPHALT            0xc0010055
@@ -99,8 +105,10 @@
 static char      native_idle_text[16];
 void           (*native_idle)(void) = NULL;
 
+static u_long   acpicpu_md_lock(struct acpicpu_softc *);
+static void     acpicpu_md_unlock(struct acpicpu_softc *, u_long);
 static int      acpicpu_md_quirk_piix4(struct pci_attach_args *);
-static void     acpicpu_md_pstate_percent_reset(struct acpicpu_softc *);
+static void     acpicpu_md_pstate_percent_reset(struct cpu_info *);
 static int      acpicpu_md_pstate_fidvid_get(struct acpicpu_softc *,
                                               uint32_t *);
 static int      acpicpu_md_pstate_fidvid_set(struct acpicpu_pstate *);
@@ -134,6 +142,24 @@
        return cfaa->ci;
 }
 
+static u_long
+acpicpu_md_lock(struct acpicpu_softc *sc)
+{
+       const u_long flags = x86_read_psl();
+
+       x86_disable_intr();
+       __cpu_simple_lock(&sc->sc_lock);
+
+       return flags;
+}
+
+static void
+acpicpu_md_unlock(struct acpicpu_softc *sc, u_long flags)
+{
+       __cpu_simple_unlock(&sc->sc_lock);
+       x86_write_psl(flags);
+}
+
 uint32_t
 acpicpu_md_cap(void)
 {
@@ -469,11 +495,27 @@
 int
 acpicpu_md_pstate_start(struct acpicpu_softc *sc)
 {
-       const uint64_t est = __BIT(16);
+       return acpicpu_md_pstate_sysctl_init();
+}
+
+int
+acpicpu_md_pstate_stop(void)
+{
+       if (acpicpu_log != NULL)
+               sysctl_teardown(&acpicpu_log);
+
+       return 0;
+}
+
+int
+acpicpu_md_pstate_init(struct acpicpu_softc *sc)
+{
+       struct acpicpu_pstate *ps, msr;
+       struct cpu_info *ci = curcpu();
+       uint32_t family, i = 0;
        uint64_t val;
 
-       if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
-               return ENODEV;
+       (void)memset(&msr, 0, sizeof(struct acpicpu_pstate));
 
        switch (cpu_vendor) {
 
@@ -483,52 +525,20 @@
                /*
                 * Make sure EST is enabled.
                 */
-               val = rdmsr(MSR_MISC_ENABLE);
-
-               if ((val & est) == 0) {
+               if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) != 0) {
 
-                       val |= est;
-
-                       wrmsr(MSR_MISC_ENABLE, val);
                        val = rdmsr(MSR_MISC_ENABLE);
 
-                       if ((val & est) == 0)
-                               return ENOTTY;
-               }
-       }
-
-       /*
-        * Reset the APERF and MPERF counters.
-        */
-       if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0)
-               acpicpu_md_pstate_percent_reset(sc);
-
-       return acpicpu_md_pstate_sysctl_init();
-}
-
-int
-acpicpu_md_pstate_stop(void)
-{
+                       if ((val & MSR_MISC_ENABLE_EST) == 0) {
 
-       if (acpicpu_log != NULL)
-               sysctl_teardown(&acpicpu_log);
-
-       return 0;
-}
+                               val |= MSR_MISC_ENABLE_EST;
+                               wrmsr(MSR_MISC_ENABLE, val);
+                               val = rdmsr(MSR_MISC_ENABLE);
 
-int
-acpicpu_md_pstate_pss(struct acpicpu_softc *sc)
-{
-       struct acpicpu_pstate *ps, msr;
-       struct cpu_info *ci = curcpu();
-       uint32_t family, i = 0;
-
-       (void)memset(&msr, 0, sizeof(struct acpicpu_pstate));
-
-       switch (cpu_vendor) {
-
-       case CPUVENDOR_IDT:
-       case CPUVENDOR_INTEL:
+                               if ((val & MSR_MISC_ENABLE_EST) == 0)
+                                       return ENOTTY;
+                       }
+               }
 
                /*
                 * If the so-called Turbo Boost is present,
@@ -541,7 +551,7 @@
                 *      in Intel Core(tm) Microarchitectures (Nehalem)
                 *      Based Processors. White Paper, November 2008.
                 */
-               if (sc->sc_pstate_count > 2 &&
+               if (sc->sc_pstate_count >= 2 &&
                   (sc->sc_flags & ACPICPU_FLAG_P_TURBO) != 0) {
 
                        ps = &sc->sc_pstate[0];
@@ -586,6 +596,9 @@
 
                default:
 
+                       /*
+                        * If we have an unknown AMD CPU, rely on XPSS.
+                        */
                        if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
                                return EOPNOTSUPP;
                }
@@ -624,87 +637,94 @@
                i++;
        }
 
+       /*
+        * Reset the APERF and MPERF counters.
+        *
+        * XXX: Should be with xc_unicast(9).
+        */
+       if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0)
+               acpicpu_md_pstate_percent_reset(sc->sc_ci);
+
        return 0;
 }
 
+/*
+ * Read the IA32_APERF and IA32_MPERF counters. The first
+ * increments at the rate of the fixed maximum frequency
+ * configured during the boot, whereas APERF counts at the
+ * rate of the actual frequency. Note that the MSRs must be
+ * read without delay, and that only the ratio between
+ * IA32_APERF and IA32_MPERF is architecturally defined.
+ *
+ * The function thus returns the percentage of the actual
+ * frequency in terms of the maximum frequency of the calling
+ * CPU since the last call. A value zero implies an error.
+ *
+ * For further details, refer to:
+ *
+ *     Intel Corporation: Intel 64 and IA-32 Architectures
+ *     Software Developer's Manual. Section 13.2, Volume 3A:
+ *     System Programming Guide, Part 1. July, 2008.
+ *
+ *     Advanced Micro Devices: BIOS and Kernel Developer's
+ *     Guide (BKDG) for AMD Family 10h Processors. Section
+ *     2.4.5, Revision 3.48, April 2010.
+ */
 uint8_t
-acpicpu_md_pstate_percent(struct acpicpu_softc *sc)
+acpicpu_md_pstate_percent(struct cpu_info *ci)
 {
+       struct acpicpu_softc *sc;
        uint64_t aperf, mperf;
-       uint64_t rv = 0;
+       uint8_t rv = 0;
+       u_long flags;
 
-       /*
-        * Read the IA32_APERF and IA32_MPERF counters. The first
-        * increments at the rate of the fixed maximum frequency
-        * configured during the boot, whereas APERF counts at the
-        * rate of the actual frequency. Note that the MSRs must be
-        * read without delay, and that only the ratio between
-        * IA32_APERF and IA32_MPERF is architecturally defined.
-        *
-        * The function thus returns the percentage of the actual
-        * frequency in terms of the maximum frequency of the calling
-        * CPU since the last call. A value zero implies an error.
-        *
-        * For further details, refer to:
-        *
-        *      Intel Corporation: Intel 64 and IA-32 Architectures
-        *      Software Developer's Manual. Section 13.2, Volume 3A:
-        *      System Programming Guide, Part 1. July, 2008.
-        *
-        *      Advanced Micro Devices: BIOS and Kernel Developer's
-        *      Guide (BKDG) for AMD Family 10h Processors. Section
-        *      2.4.5, Revision 3.48, April 2010.
-        */
-       if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0))
+       sc = acpicpu_sc[ci->ci_acpiid];
+
+       if (__predict_false(sc == NULL))
                return 0;
 
        if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P_HWF) == 0))
                return 0;
 
+       flags = acpicpu_md_lock(sc);
+
        aperf = sc->sc_pstate_aperf;
        mperf = sc->sc_pstate_mperf;
 
-       x86_disable_intr();
-
        sc->sc_pstate_aperf = rdmsr(MSR_APERF);
        sc->sc_pstate_mperf = rdmsr(MSR_MPERF);
 
-       x86_enable_intr();
-
        aperf = sc->sc_pstate_aperf - aperf;
        mperf = sc->sc_pstate_mperf - mperf;
 
        if (__predict_true(mperf != 0))
                rv = (aperf * 100) / mperf;
 
+       acpicpu_md_unlock(sc, flags);
+
        return rv;
 }
 
 static void
-acpicpu_md_pstate_percent_reset(struct acpicpu_softc *sc)
+acpicpu_md_pstate_percent_reset(struct cpu_info *ci)
 {
-       struct msr_rw_info msr;
-       uint64_t xc;
+       struct acpicpu_softc *sc;
+       u_long flags;
 
-       KASSERT((sc->sc_flags & ACPICPU_FLAG_P) != 0);
-       KASSERT((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0);
+       sc = acpicpu_sc[ci->ci_acpiid];
 
-       msr.msr_value = 0;
-       msr.msr_read = false;
-       msr.msr_type = MSR_APERF;
-
-       xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
-       xc_wait(xc);



Home | Main Index | Thread Index | Old Index