gvisor/pkg/safecopy/memclr_amd64.s

154 lines
3.5 KiB
ArmAsm
Raw Normal View History

// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// handleMemclrFault returns (the value stored in AX, the value stored in DI).
// Control is transferred to it when memclr below receives SIGSEGV or SIGBUS,
// with the faulting address stored in AX and the signal number stored in DI.
//
// It must have the same frame configuration as memclr so that it can undo any
// potential call frame set up by the assembler.
TEXT handleMemclrFault(SB), NOSPLIT, $0-28
MOVQ AX, addr+16(FP)
MOVL DI, sig+24(FP)
RET
// memclr sets the n bytes following ptr to zeroes. If a SIGSEGV or SIGBUS
// signal is received during the write, it returns the address that caused the
// fault and the number of the signal that was received. Otherwise, it returns
// an unspecified address and a signal number of 0.
//
// Data is written in order, such that if a fault happens at address p, it is
// safe to assume that all data before p-maxRegisterSize has already been
// successfully written.
//
// The code is derived from runtime.memclrNoHeapPointers.
//
// func memclr(ptr unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32)
TEXT ·memclr(SB), NOSPLIT, $0-28
// Store 0 as the returned signal number. If we run to completion,
// this is the value the caller will see; if a signal is received,
// handleMemclrFault will store a different value in this address.
MOVL $0, sig+24(FP)
MOVQ ptr+0(FP), DI
MOVQ n+8(FP), BX
XORQ AX, AX
// MOVOU seems always faster than REP STOSQ.
tail:
TESTQ BX, BX
JEQ _0
CMPQ BX, $2
JBE _1or2
CMPQ BX, $4
JBE _3or4
CMPQ BX, $8
JB _5through7
JE _8
CMPQ BX, $16
JBE _9through16
PXOR X0, X0
CMPQ BX, $32
JBE _17through32
CMPQ BX, $64
JBE _33through64
CMPQ BX, $128
JBE _65through128
CMPQ BX, $256
JBE _129through256
// TODO: use branch table and BSR to make this just a single dispatch
// TODO: for really big clears, use MOVNTDQ, even without AVX2.
loop:
MOVOU X0, 0(DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
MOVOU X0, 48(DI)
MOVOU X0, 64(DI)
MOVOU X0, 80(DI)
MOVOU X0, 96(DI)
MOVOU X0, 112(DI)
MOVOU X0, 128(DI)
MOVOU X0, 144(DI)
MOVOU X0, 160(DI)
MOVOU X0, 176(DI)
MOVOU X0, 192(DI)
MOVOU X0, 208(DI)
MOVOU X0, 224(DI)
MOVOU X0, 240(DI)
SUBQ $256, BX
ADDQ $256, DI
CMPQ BX, $256
JAE loop
JMP tail
_1or2:
MOVB AX, (DI)
MOVB AX, -1(DI)(BX*1)
RET
_0:
RET
_3or4:
MOVW AX, (DI)
MOVW AX, -2(DI)(BX*1)
RET
_5through7:
MOVL AX, (DI)
MOVL AX, -4(DI)(BX*1)
RET
_8:
// We need a separate case for 8 to make sure we clear pointers atomically.
MOVQ AX, (DI)
RET
_9through16:
MOVQ AX, (DI)
MOVQ AX, -8(DI)(BX*1)
RET
_17through32:
MOVOU X0, (DI)
MOVOU X0, -16(DI)(BX*1)
RET
_33through64:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
_65through128:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
MOVOU X0, 48(DI)
MOVOU X0, -64(DI)(BX*1)
MOVOU X0, -48(DI)(BX*1)
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
_129through256:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
MOVOU X0, 48(DI)
MOVOU X0, 64(DI)
MOVOU X0, 80(DI)
MOVOU X0, 96(DI)
MOVOU X0, 112(DI)
MOVOU X0, -128(DI)(BX*1)
MOVOU X0, -112(DI)(BX*1)
MOVOU X0, -96(DI)(BX*1)
MOVOU X0, -80(DI)(BX*1)
MOVOU X0, -64(DI)(BX*1)
MOVOU X0, -48(DI)(BX*1)
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
Use assembly stub to take the address of assembly functions Go 1.17 is adding a new register-based calling convention [1] ("ABIInternal"), which used is when calling between Go functions. Assembly functions are still written using the old ABI ("ABI0"). That is, they still accept arguments on the stack, and pass arguments to other functions on the stack. The call rules look approximately like this: 1. Direct call from Go function to Go function: compiler emits direct ABIInternal call. 2. Indirect call from Go function to Go function: compiler emits indirect ABIInternal call. 3. Direct call from Go function to assembly function: compiler emits direct ABI0 call. 4. Indirect call from Go function to assembly function: compiler emits indirect ABIInternal call to ABI conversion wrapper function. 5. Direct or indirect call from assembly function to assembly function: assembly/linker emits call to original ABI0 function. 6. Direct or indirect call from assembly function to Go function: assembly/linker emits ABI0 call to ABI conversion wrapper function. Case 4 is the interesting one here. Since the compiler can't know the ABI of an indirect call, all indirect calls are made with ABIInternal. In order to support indirect ABI0 assembly function calls, a wrapper is generated that translates ABIInternal arguments to ABI0 arguments, calls the target function, and then converts results back. When the address of an ABI0 function is taken from Go code, it evaluates to the address of this wrapper function rather than the target function so that later indirect calls will work as expected. This is normally fine, but gVisor does more than just call some of the assembly functions we take the address of: either noting the start and end address for future reference from a signal handler (safecopy), or copying the function text to a new mapping (platforms). Both of these fail with wrappers enabled (currently, this is Go tip with GOEXPERIMENT=regabiwrappers) because these operations end up operating on the wrapper instead of the target function. We work around this issue by taking advantage of case 5: references to assembly symbols from other assembly functions resolve directly to the desired target symbol. Thus, rather than using reflect to get the address of a Go reference to the functions, we create assembly stubs that return the address of the function. This approach works just as well on current versions of Go, so the change can be made immediately and doesn't require any build tags. [1] https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md PiperOrigin-RevId: 368505655
2021-04-14 21:12:08 +00:00
// func addrOfMemclr() uintptr
TEXT ·addrOfMemclr(SB), $0-8
MOVQ $·memclr(SB), AX
MOVQ AX, ret+0(FP)
RET