Port-amd64 archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Make kernel self-relocatable [PATCH]



Hello

As discussed here
http://mail-index.netbsd.org/tech-kern/2023/04/06/msg028829.html
we can encounter the situation where there is not enough room
to load the GENERIC kernel at KERNTEXTOFF_LO, because UEFI 
allocated a memory region in the way. Overwrting that region
with the kernel text secion causes a crash.

For now I have 2 unsatisfying workarounds 
- trim down the kernel configuration so that it fits in available memory
- patch the kernel to move KERNTEXTOFF higher where there is enough room.
(patch included in the above link)

Both workaround require a kernel rebuild to fit a specific UEFI 
implemntation. We are quite limited here, because code in amd64's locore.S
assumes it was loaded at KERNTEXTOFF_LO. 

It would be nice if that limitation was removed. bootx64.efi could load 
the kernel where room is available, then kernel start routine would
detect the undesisrable load address, and would perform self-relocation. 

The patch below implements that behavior. If kernel is loaded at the
right place, it just adds a test at start, hence I have the hope it
cannot break things.

Opinions?

I am not sure we need to include again startprog64 licence. The 
code was trimmed down a lot, and does not have much in common 
with the original.

Index: sys/arch/amd64/amd64/locore.S
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/locore.S,v
retrieving revision 1.218
diff -U4 -r1.218 locore.S
--- sys/arch/amd64/amd64/locore.S	3 Mar 2023 14:32:48 -0000	1.218
+++ sys/arch/amd64/amd64/locore.S	11 Apr 2023 07:59:20 -0000
@@ -455,8 +455,18 @@
 ENTRY(start)
 #ifndef XENPV
 	.code32
 
+	/* Discover load address */
+	call	next
+next:	pop	%edi
+	sub     $(next - kernel_text), %edi 
+
+	/* If not KERNBASE, reloc ourselves to KERNBASE */
+	cmpl	$(KERNTEXTOFF_LO - KERNBASE_LO), %edi
+	jne	selfreloc_start
+
+
 	/* Warm boot */
 	movw	$0x1234,0x472
 
 	/*
@@ -1756,4 +1766,136 @@
 
 LABEL(nomds_leave)
 	NOMDS_LEAVE
 LABEL(nomds_leave_end)
+
+/* This is adapted from sys/arch/i386/stand/efiboot/bootx64/startprog64.S */
+
+/*
+ * Ported to boot 386BSD by Julian Elischer (julian%tfs.com@localhost) Sept 1992
+ *
+ * Mach Operating System
+ * Copyright (c) 1992, 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution%CS.CMU.EDU@localhost
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+  Copyright 1988, 1989, 1990, 1991, 1992
+   by Intel Corporation, Santa Clara, California.
+
+                All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#define CODE_SEGMENT    0x08
+#define DATA_SEGMENT    0x10
+
+	.text
+	.p2align 4,,15
+
+	.code32
+
+/*
+ * selfreloc(loadddr edi)
+ */
+ENTRY(selfreloc_start)
+	movl	%edi, %eax
+	movl	%edi, %esi				/* src */
+	movl	$_RELOC(kernel_text), %edi		/* dest */
+	movl	$(__kernel_end - kernel_text), %ecx	/* size */
+
+	shrl	$2, %ecx		/* count for copy by words */
+	rep
+	movsl
+
+	/* load current selfreloc_start addesss in $edi */
+	movl	%eax, %edi	
+	addl	$(selfreloc_start - kernel_text), %edi
+
+
+	/* Prepare jump address */
+	lea	(selfreloc_start32a - selfreloc_start)(%edi), %eax
+	movl	%eax, (selfreloc_start32r - selfreloc_start)(%edi)
+
+	/* Setup GDT */
+	lea	(gdt - selfreloc_start)(%edi), %eax
+	mov	%eax, (gdtrr - selfreloc_start)(%edi)
+	lgdt	(gdtr - selfreloc_start)(%edi)
+
+	/* Jump to set %cs */
+	ljmp	*(selfreloc_start32r - selfreloc_start)(%edi)
+
+	.align	4
+selfreloc_start32a:
+	movl	$DATA_SEGMENT, %eax
+	movw	%ax, %ds
+	movw	%ax, %es
+	movw	%ax, %fs
+	movw	%ax, %gs
+	movw	%ax, %ss
+
+	/* Disable Paging in CR0 */
+	movl	%cr0, %eax
+	andl	$(~CR0_PG), %eax
+	movl	%eax, %cr0
+
+	/* Disable PAE in CR4 */
+	movl	%cr4, %eax
+	andl	$(~CR4_PAE), %eax
+	movl	%eax, %cr4
+
+	jmp	selfreloc_start32b
+
+	.align	4
+selfreloc_start32b:
+	xor	%eax, %eax
+	movl	$_RELOC(start), %esi
+	jmp	*%esi
+
+	.align	16
+selfreloc_start32r:
+	.long	0
+	.long	CODE_SEGMENT
+	.align	16
+gdt:
+	.long	0, 0
+	.byte	0xff, 0xff, 0x00, 0x00, 0x00, 0x9f, 0xcf, 0x00
+	.byte	0xff, 0xff, 0x00, 0x00, 0x00, 0x93, 0xcf, 0x00
+gdtr:
+	.word	gdtr - gdt
+gdtrr:
+	.quad

-- 
Emmanuel Dreyfus
manu%netbsd.org@localhost


Home | Main Index | Thread Index | Old Index