Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/bouyer-socketcan]: src/sys/arch/i386/stand/efiboot 1673251



details:   https://anonhg.NetBSD.org/src/rev/3dc5d5ed3ad4
branches:  bouyer-socketcan
changeset: 820836:3dc5d5ed3ad4
user:      nonaka <nonaka%NetBSD.org@localhost>
date:      Sat Feb 11 10:23:40 2017 +0000

description:
1673251

diffstat:

 sys/arch/i386/stand/efiboot/bootx64/efibootx64.c  |   78 ++++++
 sys/arch/i386/stand/efiboot/bootx64/startprog64.S |  264 ++++++++++++++++++++++
 sys/arch/i386/stand/efiboot/efiboot.c             |  152 ++++++++++++
 sys/arch/i386/stand/efiboot/efiboot.h             |   71 +++++
 4 files changed, 565 insertions(+), 0 deletions(-)

diffs (truncated from 581 to 300 lines):

diff -r 584d270335b3 -r 3dc5d5ed3ad4 sys/arch/i386/stand/efiboot/bootx64/efibootx64.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/i386/stand/efiboot/bootx64/efibootx64.c  Sat Feb 11 10:23:40 2017 +0000
@@ -0,0 +1,78 @@
+/*     $NetBSD: efibootx64.c,v 1.2.6.2 2017/02/11 10:23:40 nonaka Exp $        */
+
+/*-
+ * Copyright (c) 2016 Kimihiro Nonaka <nonaka%netbsd.org@localhost>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efiboot.h"
+
+#include <sys/bootblock.h>
+
+struct x86_boot_params boot_params;
+
+void startprog64_start(physaddr_t, physaddr_t, physaddr_t, u_long,
+    void *, physaddr_t);
+extern void (*startprog64)(physaddr_t, physaddr_t, physaddr_t, u_long,
+    void *, physaddr_t);
+extern u_int startprog64_size;
+
+void
+efi_md_init(void)
+{
+       EFI_STATUS status;
+       EFI_PHYSICAL_ADDRESS addr = EFI_ALLOCATE_MAX_ADDRESS;
+       u_int sz = EFI_SIZE_TO_PAGES(startprog64_size);
+
+       status = uefi_call_wrapper(BS->AllocatePages, 4, AllocateMaxAddress,
+           EfiLoaderData, sz, &addr);
+       if (EFI_ERROR(status))
+               Panic(L"%a: AllocatePages() failed: %d page(s): %r",
+                   __func__, sz, status);
+       startprog64 = (void *)addr;
+       CopyMem(startprog64, startprog64_start, startprog64_size);
+}
+
+/* ARGSUSED */
+void
+startprog(physaddr_t entry, uint32_t argc, uint32_t *argv, physaddr_t sp)
+{
+       uint32_t *newsp = (void *)((char *)startprog64 + startprog64_size);
+
+       /* Copy argv to new stack pointer */
+       if (argc > 0) {
+               newsp -= argc;
+               memcpy(newsp, argv, sizeof(*argv) * argc);
+       }
+
+       (*startprog64)(efi_kernel_start, efi_kernel_start + efi_loadaddr,
+           (physaddr_t)newsp, efi_kernel_size, startprog64, entry);
+}
+
+/* ARGSUSED */
+void
+multiboot(physaddr_t entry, physaddr_t header, physaddr_t sp)
+{
+       Panic(L"%a: not implemented", __func__);
+}
diff -r 584d270335b3 -r 3dc5d5ed3ad4 sys/arch/i386/stand/efiboot/bootx64/startprog64.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/i386/stand/efiboot/bootx64/startprog64.S Sat Feb 11 10:23:40 2017 +0000
@@ -0,0 +1,264 @@
+/*     $NetBSD: startprog64.S,v 1.3.6.2 2017/02/11 10:23:40 nonaka Exp $       */
+/*     NetBSD: startprog.S,v 1.3 2003/02/01 14:48:18 dsl Exp   */
+
+/* starts program in protected mode / flat space
+ with given stackframe
+ needs global variables flatcodeseg and flatdataseg
+ (gdt offsets)
+  derived from: NetBSD:sys/arch/i386/boot/asm.S
+ */
+
+/*
+ * Ported to boot 386BSD by Julian Elischer (julian%tfs.com@localhost) Sept 1992
+ *
+ * Mach Operating System
+ * Copyright (c) 1992, 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution%CS.CMU.EDU@localhost
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+  Copyright 1988, 1989, 1990, 1991, 1992
+   by Intel Corporation, Santa Clara, California.
+
+                All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <machine/asm.h>
+#include <machine/specialreg.h>
+
+#define        CODE_SEGMENT    0x08
+#define        DATA_SEGMENT    0x10
+
+       .align  16
+       .globl _C_LABEL(startprog64)
+_C_LABEL(startprog64):
+       .quad 0
+
+       .globl _C_LABEL(startprog64_size)
+_C_LABEL(startprog64_size):
+       .long startprog64_end - _C_LABEL(startprog64_start)
+
+       .text
+       .p2align 4,,15
+
+/*
+ * startprog64(loadddr,entry,stack,kern_load,kern_start,kern_size)
+ */
+ENTRY(startprog64_start)
+start:
+       /*
+        * This function is to call the loaded kernel's start() with
+        * 32bit segment mode from x64 mode.
+        * %rdi: kernel start address
+        * %rsi: loaded kernel address
+        * %rdx: stack address
+        * %rcx: loaded kernel size
+        * %r8 : loaded start address
+        * %r9 : kernel entry address
+        */
+
+       cld             /* LynxOS depends on it */
+
+       cli
+
+       /* Copy kernel */
+       mov     %rcx, %r12              /* original kernel size */
+       movq    %rdi, %r11              /* for misaligned check */
+
+#if !defined(NO_OVERLAP)
+       movq    %rdi, %r13
+       subq    %rsi, %r13
+#endif
+
+       shrq    $3, %rcx                /* count for copy by words */
+       jz      8f                      /* j if less than 8 bytes */
+
+       lea     -8(%rdi, %r12), %r14    /* target address of last 8 */
+       mov     -8(%rsi, %r12), %r15    /* get last word */
+#if !defined(NO_OVERLAP)
+       cmpq    %r12, %r13              /* overlapping? */
+       jb      10f
+#endif
+
+/*
+ * Non-overlaping, copy forwards.
+ * Newer Intel cpus (Nehalem) will do 16byte read/write transfers
+ * if %ecx is more than 76.
+ * AMD might do something similar some day.
+ */
+       and     $7, %r11                /* destination misaligned ? */
+       jnz     2f
+       rep
+       movsq
+       mov     %r15, (%r14)            /* write last word */
+       jmp     .Lcopy_done
+
+/*
+ * Destination misaligned
+ * AMD say it is better to align the destination (not the source).
+ * This will also re-align copies if the source and dest are both
+ * misaligned by the same amount)
+ * (I think Nehalem will use its accelerated copy if the source
+ * and destination have the same alignment.)
+ */
+2:
+       lea     -9(%r11, %r12), %rcx    /* post re-alignment count */
+       neg     %r11                    /* now -1 .. -7 */
+       mov     (%rsi), %r12            /* get first word */
+       mov     %rdi, %r13              /* target for first word */
+       lea     8(%rsi, %r11), %rsi
+       lea     8(%rdi, %r11), %rdi
+       shr     $3, %rcx
+       rep
+       movsq
+       mov     %r12, (%r13)            /* write first word */
+       mov     %r15, (%r14)            /* write last word */
+       jmp     .Lcopy_done
+
+#if !defined(NO_OVERLAP)
+/* Must copy backwards.
+ * Reverse copy is probably easy to code faster than 'rep movds'
+ * since that requires (IIRC) an extra clock every 3 iterations (AMD).
+ * However I don't suppose anything cares that much!
+ * The big cost is the std/cld pair - reputedly 50+ cycles on Netburst P4.
+ * The copy is aligned with the buffer start (more likely to
+ * be a multiple of 8 than the end).
+ */
+10:
+       lea     -8(%rsi, %rcx, 8), %rsi
+       lea     -8(%rdi, %rcx, 8), %rdi
+       std
+       rep
+       movsq
+       cld
+       mov     %r15, (%r14)    /* write last bytes */
+       jmp     .Lcopy_done
+#endif
+
+/* Less than 8 bytes to copy, copy by bytes */
+/* Intel Nehalem optimise 'rep movsb' for <= 7 bytes (9-15 clocks).
+ * For longer transfers it is 50+ !
+ */
+8:     mov     %r12, %rcx
+
+#if !defined(NO_OVERLAP)
+       cmpq    %r12, %r13      /* overlapping? */
+       jb      81f
+#endif
+
+       /* nope, copy forwards. */
+       rep
+       movsb
+       jmp     .Lcopy_done
+
+#if !defined(NO_OVERLAP)
+/* Must copy backwards */
+81:
+       lea     -1(%rsi, %rcx), %rsi
+       lea     -1(%rdi, %rcx), %rdi
+       std
+       rep
+       movsb
+       cld
+#endif
+       /* End of copy kernel */
+.Lcopy_done:
+
+       mov     %r8, %rdi       /* %rdi: loaded start address */
+       mov     %r9, %rsi       /* %rsi: kernel entry address */
+
+       /* Prepare jump address */
+       lea     (start32a - start)(%rdi), %rax
+       movl    %eax, (start32r - start)(%rdi)
+
+       /* Setup GDT */
+       lea     (gdt - start)(%rdi), %rax
+       mov     %rax, (gdtrr - start)(%rdi)
+       lgdt    (gdtr - start)(%rdi)
+



Home | Main Index | Thread Index | Old Index