Current-Users archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: dtracing unlink



On Tue, 31 Oct 2023, bch wrote:

This should be better:

Index: cddl/osnet/dev/dtrace/amd64/dtrace_asm.S
==================================================================
--- cddl/osnet/dev/dtrace/amd64/dtrace_asm.S
+++ cddl/osnet/dev/dtrace/amd64/dtrace_asm.S
@@ -215,12 +215,14 @@
       pushq   %rbp
       movq    %rsp, %rbp

       xchgq   %rdi, %rsi              /* make %rsi source, %rdi dest */
       movq    %rdx, %rcx              /* load count */
+       callq smap_disable
       repz                            /* repeat for count ... */
       smovb                           /*   move from %ds:rsi to %ed:rdi */
+       callq smap_enable
       leave
       ret
       END(dtrace_copy)

/*
@@ -230,10 +232,11 @@
*/
       ENTRY(dtrace_copystr)
       pushq   %rbp
       movq    %rsp, %rbp

+       callq smap_disable
0:
       movb    (%rdi), %al             /* load from source */
       movb    %al, (%rsi)             /* store to destination */
       addq    $1, %rdi                /* increment source pointer */
       addq    $1, %rsi                /* increment destination pointer */
@@ -246,10 +249,11 @@
       jnz     2f
1:
       cmpq    $0, %rdx
       jne     0b
2:
+       callq smap_enable
       leave
       ret

       END(dtrace_copystr)

@@ -256,50 +260,60 @@
/*
uintptr_t
dtrace_fulword(void *addr)
*/
       ENTRY(dtrace_fulword)
+       callq smap_disable
       movq    (%rdi), %rax
+       callq smap_enable
       ret
       END(dtrace_fulword)

/*
uint8_t
dtrace_fuword8_nocheck(void *addr)
*/
       ENTRY(dtrace_fuword8_nocheck)
+       callq smap_disable
       xorq    %rax, %rax
       movb    (%rdi), %al
+       callq smap_enable
       ret
       END(dtrace_fuword8_nocheck)

/*
uint16_t
dtrace_fuword16_nocheck(void *addr)
*/
       ENTRY(dtrace_fuword16_nocheck)
+       callq smap_disable
       xorq    %rax, %rax
       movw    (%rdi), %ax
+       callq smap_enable
       ret
       END(dtrace_fuword16_nocheck)

/*
uint32_t
dtrace_fuword32_nocheck(void *addr)
*/
       ENTRY(dtrace_fuword32_nocheck)
+       callq smap_disable
       xorq    %rax, %rax
       movl    (%rdi), %eax
+       callq smap_enable
       ret
       END(dtrace_fuword32_nocheck)

/*
uint64_t
dtrace_fuword64_nocheck(void *addr)
*/
       ENTRY(dtrace_fuword64_nocheck)
+       callq smap_disable
       movq    (%rdi), %rax
+       callq smap_enable
       ret
       END(dtrace_fuword64_nocheck)

/*
void


I think we can just use SMAP_DISABLE/SMAP_ENABLE like in the standard
copyinstr() in sys/arch/amd64/amd64/copy.S. The kernel will hotpatch
the correct instructions after checking CPU features very early.

See:

sys/arch/x86/x86/patch.c

-RVP

PS. bch: you should skip the HTML in your emails: none of your messages
have ended up on the mailing-list :)


Home | Main Index | Thread Index | Old Index