Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/amd64/include Move all the XEN differences to a sin...



details:   https://anonhg.NetBSD.org/src/rev/6da2d934ec9f
branches:  trunk
changeset: 779173:6da2d934ec9f
user:      dsl <dsl%NetBSD.org@localhost>
date:      Mon May 07 20:51:20 2012 +0000

description:
Move all the XEN differences to a single conditional.
Merge the XEN/non-XEN versions of INTRFASTEXIT and
  INTR_RECURSE_HWFRAME by using extra defines.
Split INTRENTRY so that code can insert extra instructions
  inside user/kernel conditional.

diffstat:

 sys/arch/amd64/include/frameasm.h |  84 ++++++++++++++------------------------
 1 files changed, 32 insertions(+), 52 deletions(-)

diffs (133 lines):

diff -r 7d3c5dbb317e -r 6da2d934ec9f sys/arch/amd64/include/frameasm.h
--- a/sys/arch/amd64/include/frameasm.h Mon May 07 18:36:16 2012 +0000
+++ b/sys/arch/amd64/include/frameasm.h Mon May 07 20:51:20 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: frameasm.h,v 1.16 2011/08/10 06:33:13 cherry Exp $     */
+/*     $NetBSD: frameasm.h,v 1.17 2012/05/07 20:51:20 dsl Exp $        */
 
 #ifndef _AMD64_MACHINE_FRAMEASM_H
 #define _AMD64_MACHINE_FRAMEASM_H
@@ -17,7 +17,23 @@
 /* Xen do not need swapgs, done by hypervisor */
 #define swapgs
 #define iretq  pushq $0 ; jmp HYPERVISOR_iret
-#endif
+#define        XEN_ONLY2(x,y)  x,y
+#define        NOT_XEN(x)
+
+#define CLI(temp_reg) \
+       movq CPUVAR(VCPU),%r ## temp_reg ;                      \
+       movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
+
+#define STI(temp_reg) \
+       movq CPUVAR(VCPU),%r ## temp_reg ;                      \
+       movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
+
+#else /* XEN */
+#define        XEN_ONLY2(x,y)
+#define        NOT_XEN(x)      x
+#define CLI(temp_reg) cli
+#define STI(temp_reg) sti
+#endif /* XEN */
 
 /*
  * These are used on interrupt or trap entry or exit.
@@ -57,23 +73,27 @@
        movq    TF_RBX(%rsp),%rbx       ; \
        movq    TF_RAX(%rsp),%rax
 
-#define        INTRENTRY \
+#define        INTRENTRY_L(kernel_trap) \
        subq    $TF_REGSIZE,%rsp        ; \
-       testq   $SEL_UPL,TF_CS(%rsp)    ; \
-       je      98f                     ; \
+       INTR_SAVE_GPRS                  ; \
+       testb   $SEL_UPL,TF_CS(%rsp)    ; \
+       je      kernel_trap             ; \
        swapgs                          ; \
        movw    %gs,TF_GS(%rsp)         ; \
        movw    %fs,TF_FS(%rsp)         ; \
        movw    %es,TF_ES(%rsp)         ; \
-       movw    %ds,TF_DS(%rsp)         ; \
-98:    INTR_SAVE_GPRS
+       movw    %ds,TF_DS(%rsp) 
 
-#ifndef XEN
+#define        INTRENTRY \
+       INTRENTRY_L(98f)                ; \
+98:
+
 #define INTRFASTEXIT \
        INTR_RESTORE_GPRS               ; \
        testq   $SEL_UPL,TF_CS(%rsp)    /* Interrupted %cs */ ; \
        je      99f                     ; \
-       cli                             ; \
+/* XEN: Disabling events before going to user mode sounds like a BAD idea */ \
+       NOT_XEN(cli;)                     \
        movw    TF_ES(%rsp),%es         ; \
        movw    TF_DS(%rsp),%ds         ; \
        swapgs                          ; \
@@ -88,41 +108,15 @@
        pushfq                          ; \
        movl    %cs,%r11d               ; \
        pushq   %r11                    ; \
+/* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
+       XEN_ONLY2(andb  $0xfc,(%rsp))     \
        pushq   %r13                    ;
 
-#else  /* !XEN */
-/*
- * Disabling events before going to user mode sounds like a BAD idea
- * do no restore gs either, HYPERVISOR_iret will do a swapgs
- */
-#define INTRFASTEXIT \
-       INTR_RESTORE_GPRS               ; \
-       testq   $SEL_UPL,TF_CS(%rsp)    ; \
-       je      99f                     ; \
-       movw    TF_ES(%rsp),%es         ; \
-       movw    TF_DS(%rsp),%ds         ; \
-99:    addq    $TF_REGSIZE+16,%rsp     /* + T_xxx and error code */ ; \
-       iretq
-  
-/* We must fixup CS, as even kernel mode runs at CPL 3 */
-#define INTR_RECURSE_HWFRAME \
-       movq    %rsp,%r10               ; \
-       movl    %ss,%r11d               ; \
-       pushq   %r11                    ; \
-       pushq   %r10                    ; \
-       pushfq                          ; \
-       movl    %cs,%r11d               ; \
-       pushq   %r11                    ; \
-       andb    $0xfc,(%rsp)            ; \
-       pushq   %r13                    ;
- 
-#endif /* !XEN */
- 
 #define        DO_DEFERRED_SWITCH \
        cmpl    $0, CPUVAR(WANT_PMAPLOAD)               ; \
        jz      1f                                      ; \
        call    _C_LABEL(do_pmap_load)                  ; \
-       1:
+1:
 
 #define        CHECK_DEFERRED_SWITCH \
        cmpl    $0, CPUVAR(WANT_PMAPLOAD)
@@ -130,18 +124,4 @@
 #define CHECK_ASTPENDING(reg)  cmpl    $0, L_MD_ASTPENDING(reg)
 #define CLEAR_ASTPENDING(reg)  movl    $0, L_MD_ASTPENDING(reg)
 
-#ifdef XEN
-#define CLI(temp_reg) \
-       movq CPUVAR(VCPU),%r ## temp_reg ;                      \
-       movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
-
-#define STI(temp_reg) \
-       movq CPUVAR(VCPU),%r ## temp_reg ;                      \
-       movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
-
-#else /* XEN */
-#define CLI(temp_reg) cli
-#define STI(temp_reg) sti
-#endif /* XEN */
-
 #endif /* _AMD64_MACHINE_FRAMEASM_H */



Home | Main Index | Thread Index | Old Index