Port-xen archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: XenServer and viridian
On Thu, 27 Mar 2025, Manuel Bouyer wrote:
OK, so in this case the interface to use is proably though the
"Xen Platform Device" PCI device, as the native Xen interface is not available.
More code needed ...
Luckily, very litle code needed and no different interface. The problem
was explained in our source tree all along at
sys/external/mit/xen-include-public/dist/xen/include/public/arch-x86/cpuid.h
[snip]
/*
* For compatibility with other hypervisor interfaces, the Xen cpuid leaves
* can be found at the first otherwise unused 0x100 aligned boundary starting
* from 0x40000000.
*
* e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
* leaves will start at 0x40000100
*/
[snip]
The existing code in hypervisor.c tried to use the Hyper-V data in the CPU
leaf at 0x40000000 and the KASSERT to catch this didn't trigger (descs[0]
was 6002 not 1).
Therefore, inspired by OpenBSD's pvbus, I've altered identcpu.c so that it
goes through all the leaves, not just the first, identifying them as it
goes along. The last one is the one that is used and this is then stored
for later use in hypervisor.c.
See the attached patch which can also be found at:
https://projects.precedence.co.uk/downloads/xenhv.patch
It's probably not the best way of doing it, so I would appreciate your
view.
Userland cpuctl also needs adjusting as it currently still only looks at
the first leaf, so still reports Hyper-V.
--
Stephen
? sys/arch/x86/x86/identcpu.c.xen
Index: sys/arch/xen/xen/hypervisor.c
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/xen/hypervisor.c,v
retrieving revision 1.96.4.1
diff -u -r1.96.4.1 hypervisor.c
--- sys/arch/xen/xen/hypervisor.c 29 Mar 2025 10:32:43 -0000 1.96.4.1
+++ sys/arch/xen/xen/hypervisor.c 1 Apr 2025 09:23:14 -0000
@@ -218,7 +218,7 @@
extern vaddr_t hypercall_page;
u_int descs[4];
- x86_cpuid(XEN_CPUID_LEAF(2), descs);
+ x86_cpuid(vm_cpuleaf + 2, descs);
/*
* Given 32 bytes per hypercall stub, and an optimistic number
@@ -228,7 +228,10 @@
* anyway. Make sure the allocation matches the registration.
*/
- KASSERT(descs[0] == 1);
+ if (descs[0] != 1) {
+ aprint_normal("Unrecognised Xen interface\n");
+ return;
+ }
/* XXX: vtophys(&hypercall_page) */
wrmsr(descs[1], (uintptr_t)&hypercall_page - KERNBASE);
Index: sys/arch/x86/include/cpu.h
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/include/cpu.h,v
retrieving revision 1.133.4.2
diff -u -r1.133.4.2 cpu.h
--- sys/arch/x86/include/cpu.h 29 Mar 2025 10:32:43 -0000 1.133.4.2
+++ sys/arch/x86/include/cpu.h 1 Apr 2025 09:23:14 -0000
@@ -518,6 +518,7 @@
VM_LAST
} vm_guest_t;
extern vm_guest_t vm_guest;
+extern uint32_t vm_cpuleaf;
static __inline bool __unused
vm_guest_is_xenpv(void)
Index: sys/arch/x86/x86/identcpu.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/identcpu.c,v
retrieving revision 1.123.4.3
diff -u -r1.123.4.3 identcpu.c
--- sys/arch/x86/x86/identcpu.c 29 Mar 2025 10:32:43 -0000 1.123.4.3
+++ sys/arch/x86/x86/identcpu.c 1 Apr 2025 09:23:14 -0000
@@ -1072,6 +1072,7 @@
* Hypervisor
*/
vm_guest_t vm_guest = VM_GUEST_NO;
+uint32_t vm_cpuleaf = 0x40000000;
struct vm_name_guest {
const char *name;
@@ -1097,13 +1098,20 @@
{ "KVM", VM_GUEST_VM }, /* KVM */
};
+#define CPUID_HV_SIGNATURE_START 0x40000000
+#define CPUID_HV_SIGNATURE_END 0x40010000
+#define CPUID_HV_SIGNATURE_STEP 0x100
+#define CPUID_HV_SIGNATURE_STRLEN 12
+
+
void
identify_hypervisor(void)
{
u_int regs[6];
- char hv_vendor[12];
+ char hv_vendor[13];
const char *p;
int i;
+ uint32_t base;
switch (vm_guest) {
case VM_GUEST_XENPV:
@@ -1117,34 +1125,82 @@
/*
* [RFC] CPUID usage for interaction between Hypervisors and Linux.
- * http://lkml.org/lkml/2008/10/1/246
+ * https://lore.kernel.org/lkml/48E3D5F2.4090708%goop.org@localhost/t/
*
* KB1009458: Mechanisms to determine if software is running in
* a VMware virtual machine
* http://kb.vmware.com/kb/1009458
+ *
+ * Microsoft Hypervisor Discovery
+ * https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/feature-discovery
+ *
+ * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
+ * can be found at the first otherwise unused 0x100 aligned boundary starting
+ * from 0x40000000.
+ *
+ * e.g. If viridian extensions are enabled for an HVM domain, the Xen cpuid
+ * leaves will start at 0x40000100
+ * https://xenbits.xen.org/docs/unstable/hypercall/x86_64/include,public,arch-x86,cpuid.h.html
*/
+
if (ISSET(cpu_feature[1], CPUID2_RAZ)) {
vm_guest = VM_GUEST_VM;
- x86_cpuid(0x40000000, regs);
- if (regs[0] >= 0x40000000) {
- memcpy(&hv_vendor[0], ®s[1], sizeof(*regs));
- memcpy(&hv_vendor[4], ®s[2], sizeof(*regs));
- memcpy(&hv_vendor[8], ®s[3], sizeof(*regs));
- if (memcmp(hv_vendor, "VMwareVMware", 12) == 0)
- vm_guest = VM_GUEST_VMWARE;
- else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) {
- vm_guest = VM_GUEST_HV;
+ aprint_normal("Hypervisor: ");
+ for (base = CPUID_HV_SIGNATURE_START;
+ base < CPUID_HV_SIGNATURE_END;
+ base += CPUID_HV_SIGNATURE_STEP) {
+ x86_cpuid(base, regs);
+ if (regs[0] >= base) {
+ memcpy(&hv_vendor[0], ®s[1], sizeof(*regs));
+ memcpy(&hv_vendor[4], ®s[2], sizeof(*regs));
+ memcpy(&hv_vendor[8], ®s[3], sizeof(*regs));
+ hv_vendor[12] = '\0';
+ for (i = 0; i < 4; i++) {
+ /*
+ * Check if first 4 chars are printable ASCII as
+ * minimal validity check
+ */
+ if (hv_vendor[i] < 32 || hv_vendor[i] > 126)
+ goto out;
+ }
+
+ vm_cpuleaf = base;
+ if (base > CPUID_HV_SIGNATURE_START)
+ aprint_normal(", ");
+ if (memcmp(hv_vendor, "VMwareVMware", 12) == 0) {
+ vm_guest = VM_GUEST_VMWARE;
+ aprint_normal("VMware");
+ }
+ else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) {
+ vm_guest = VM_GUEST_HV;
+ aprint_normal("Hyper-V");
+ /* XXX Nothing sets NHYPERV */
#if NHYPERV > 0
- hyperv_early_init();
+ hyperv_early_init();
#endif
- } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0)
- vm_guest = VM_GUEST_KVM;
- else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0)
- vm_guest = VM_GUEST_XENHVM;
- /* FreeBSD bhyve: "bhyve bhyve " */
- /* OpenBSD vmm: "OpenBSDVMM58" */
- /* NetBSD nvmm: "___ NVMM ___" */
+ } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0) {
+ vm_guest = VM_GUEST_KVM;
+ aprint_normal("KVM");
+ } else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0) {
+ vm_guest = VM_GUEST_XENHVM;
+ aprint_normal("Xen");
+ } else if (memcmp(hv_vendor, "bhyve bhyve ", 12) == 0) {
+ vm_guest = VM_GUEST_VM;
+ aprint_normal("bhyve");
+ } else if (memcmp(hv_vendor, "OpenBSDVMM58", 12) == 0) {
+ vm_guest = VM_GUEST_VM;
+ aprint_normal("OpenBSD VMM");
+ } else if (memcmp(hv_vendor, "___ NVMM ___", 12) == 0) {
+ vm_guest = VM_GUEST_VM;
+ aprint_normal("NetBSD nvmm");
+ } else {
+ aprint_normal("Unknown (%s)", hv_vendor);
+ }
+ }
}
+out:
+ aprint_normal("\n");
+
// VirtualBox returns KVM, so keep going.
if (vm_guest != VM_GUEST_KVM)
return;
Home |
Main Index |
Thread Index |
Old Index