enable ring0 to support arm64

This patch enabled the basic framework for arm64 guest.

Serveral jobs were finished in this patch:
1, ring0.Vectors()
2, switchToUser()
3, basic framwork for Arm64 guest.

Signed-off-by: Bin Lu <bin.lu@arm.com>
This commit is contained in:
Bin Lu 2019-09-26 06:09:32 +00:00
parent dd0e5eedae
commit 2cee066929
10 changed files with 1105 additions and 24 deletions

View File

@ -1,17 +1,16 @@
load("//tools/go_stateify:defs.bzl", "go_library")
load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
package(licenses = ["notice"])
load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
go_template(
name = "defs",
srcs = [
"defs.go",
"defs_amd64.go",
"offsets_amd64.go",
"x86.go",
],
srcs = select(
{
"@bazel_tools//src/conditions:linux_aarch64": ["defs.go", "defs_arm64.go", "offsets_arm64.go", "aarch64.go",],
"//conditions:default": ["defs.go", "defs_amd64.go", "offsets_amd64.go", "x86.go",],
},
),
visibility = [":__subpackages__"],
)
@ -23,10 +22,15 @@ go_template_instance(
)
genrule(
name = "entry_impl_amd64",
srcs = ["entry_amd64.s"],
outs = ["entry_impl_amd64.s"],
cmd = "(echo -e '// build +amd64\\n' && $(location //pkg/sentry/platform/ring0/gen_offsets) && cat $(SRCS)) > $@",
name = "entry_impl",
srcs = ["entry_amd64.s", "entry_arm64.s"],
outs = ["entry_impl.s"],
cmd = select(
{
"@bazel_tools//src/conditions:linux_aarch64": "(echo -e '// build +arm64\\n' && $(location //pkg/sentry/platform/ring0/gen_offsets) && cat $(location entry_arm64.s)) > $@",
"//conditions:default": "(echo -e '// build +amd64\\n' && $(location //pkg/sentry/platform/ring0/gen_offsets) && cat $(location entry_amd64.s)) > $@",
},
),
tools = ["//pkg/sentry/platform/ring0/gen_offsets"],
)
@ -35,12 +39,15 @@ go_library(
srcs = [
"defs_impl.go",
"entry_amd64.go",
"entry_impl_amd64.s",
"entry_arm64.go",
"entry_impl.s",
"kernel.go",
"kernel_amd64.go",
"kernel_arm64.go",
"kernel_unsafe.go",
"lib_amd64.go",
"lib_amd64.s",
"lib_arm64.go",
"ring0.go",
],
importpath = "gvisor.dev/gvisor/pkg/sentry/platform/ring0",

View File

@ -0,0 +1,109 @@
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
// Useful bits.
const (
_PGD_PGT_BASE = 0x1000
_PGD_PGT_SIZE = 0x1000
_PUD_PGT_BASE = 0x2000
_PUD_PGT_SIZE = 0x1000
_PMD_PGT_BASE = 0x3000
_PMD_PGT_SIZE = 0x4000
_PTE_PGT_BASE = 0x7000
_PTE_PGT_SIZE = 0x1000
_PSR_MODE_EL0t = 0x0
_PSR_MODE_EL1t = 0x4
_PSR_MODE_EL1h = 0x5
_PSR_EL_MASK = 0xf
_PSR_D_BIT = 0x200
_PSR_A_BIT = 0x100
_PSR_I_BIT = 0x80
_PSR_F_BIT = 0x40
)
const (
// KernelFlagsSet should always be set in the kernel.
KernelFlagsSet = _PSR_MODE_EL1h
// UserFlagsSet are always set in userspace.
UserFlagsSet = _PSR_MODE_EL0t
KernelFlagsClear = _PSR_EL_MASK
UserFlagsClear = _PSR_EL_MASK
PsrDefaultSet = _PSR_D_BIT | _PSR_A_BIT | _PSR_I_BIT | _PSR_F_BIT
)
// Vector is an exception vector.
type Vector uintptr
// Exception vectors.
const (
El1SyncInvalid = iota
El1IrqInvalid
El1FiqInvalid
El1ErrorInvalid
El1Sync
El1Irq
El1Fiq
El1Error
El0Sync
El0Irq
El0Fiq
El0Error
El0Sync_invalid
El0Irq_invalid
El0Fiq_invalid
El0Error_invalid
El1Sync_da
El1Sync_ia
El1Sync_sp_pc
El1Sync_undef
El1Sync_dbg
El1Sync_inv
El0Sync_svc
El0Sync_da
El0Sync_ia
El0Sync_fpsimd_acc
El0Sync_sve_acc
El0Sync_sys
El0Sync_sp_pc
El0Sync_undef
El0Sync_dbg
El0Sync_inv
VirtualizationException
_NR_INTERRUPTS
)
// System call vectors.
const (
Syscall Vector = El0Sync_svc
PageFault Vector = El0Sync_da
)
// VirtualAddressBits returns the number bits available for virtual addresses.
func VirtualAddressBits() uint32 {
return 48
}
// PhysicalAddressBits returns the number of bits available for physical addresses.
func PhysicalAddressBits() uint32 {
return 40
}

View File

@ -20,17 +20,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usermem"
)
var (
// UserspaceSize is the total size of userspace.
UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
// MaximumUserAddress is the largest possible user address.
MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
)
// Kernel is a global kernel object.
//
// This contains global state, shared by multiple CPUs.

View File

@ -20,6 +20,17 @@ import (
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
)
var (
// UserspaceSize is the total size of userspace.
UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
// MaximumUserAddress is the largest possible user address.
MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
)
// Segment indices and Selectors.
const (
// Index into GDT array.

View File

@ -0,0 +1,133 @@
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
import (
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
)
var (
// UserspaceSize is the total size of userspace.
UserspaceSize = uintptr(1) << (VirtualAddressBits())
// MaximumUserAddress is the largest possible user address.
MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
)
// KernelOpts has initialization options for the kernel.
type KernelOpts struct {
// PageTables are the kernel pagetables; this must be provided.
PageTables *pagetables.PageTables
}
// KernelArchState contains architecture-specific state.
type KernelArchState struct {
KernelOpts
}
// CPUArchState contains CPU-specific arch state.
type CPUArchState struct {
// stack is the stack used for interrupts on this CPU.
stack [512]byte
// errorCode is the error code from the last exception.
errorCode uintptr
// errorType indicates the type of error code here, it is always set
// along with the errorCode value above.
//
// It will either by 1, which indicates a user error, or 0 indicating a
// kernel error. If the error code below returns false (kernel error),
// then it cannot provide relevant information about the last
// exception.
errorType uintptr
// faultAddr is the value of far_el1.
faultAddr uintptr
// ttbr0Kvm is the value of ttbr0_el1 for sentry.
ttbr0Kvm uintptr
// ttbr0App is the value of ttbr0_el1 for applicaton.
ttbr0App uintptr
// exception vector.
vecCode Vector
// application context pointer.
appAddr uintptr
}
// ErrorCode returns the last error code.
//
// The returned boolean indicates whether the error code corresponds to the
// last user error or not. If it does not, then fault information must be
// ignored. This is generally the result of a kernel fault while servicing a
// user fault.
//
//go:nosplit
func (c *CPU) ErrorCode() (value uintptr, user bool) {
return c.errorCode, c.errorType != 0
}
// ClearErrorCode resets the error code.
//
//go:nosplit
func (c *CPU) ClearErrorCode() {
c.errorCode = 0 // No code.
c.errorType = 1 // User mode.
}
//go:nosplit
func (c *CPU) GetFaultAddr() (value uintptr) {
return c.faultAddr
}
//go:nosplit
func (c *CPU) SetTtbr0Kvm(value uintptr) {
c.ttbr0Kvm = value
}
//go:nosplit
func (c *CPU) SetTtbr0App(value uintptr) {
c.ttbr0App = value
}
//go:nosplit
func (c *CPU) GetVector() (value Vector) {
return c.vecCode
}
//go:nosplit
func (c *CPU) SetAppAddr(value uintptr) {
c.appAddr = value
}
// SwitchArchOpts are embedded in SwitchOpts.
type SwitchArchOpts struct {
// UserASID indicates that the application ASID to be used on switch,
UserASID uint16
// KernelASID indicates that the kernel ASID to be used on return,
KernelASID uint16
}
func init() {
}

View File

@ -0,0 +1,60 @@
// Copyright 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
// This is an assembly function.
//
// The sysenter function is invoked in two situations:
//
// (1) The guest kernel has executed a system call.
// (2) The guest application has executed a system call.
//
// The interrupt flag is examined to determine whether the system call was
// executed from kernel mode or not and the appropriate stub is called.
func El1_sync_invalid()
func El1_irq_invalid()
func El1_fiq_invalid()
func El1_error_invalid()
func El1_sync()
func El1_irq()
func El1_fiq()
func El1_error()
func El0_sync()
func El0_irq()
func El0_fiq()
func El0_error()
func El0_sync_invalid()
func El0_irq_invalid()
func El0_fiq_invalid()
func El0_error_invalid()
func Vectors()
// Start is the CPU entrypoint.
//
// The CPU state will be set to c.Registers().
func Start()
func kernelExitToEl1()
func kernelExitToEl0()
// Shutdown execution
func Shutdown()

View File

@ -0,0 +1,565 @@
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "funcdata.h"
#include "textflag.h"
// NB: Offsets are programatically generated (see BUILD).
//
// This file is concatenated with the definitions.
// Saves a register set.
//
// This is a macro because it may need to executed in contents where a stack is
// not available for calls.
//
#define ERET() \
WORD $0xd69f03e0
#define RSV_REG R18_PLATFORM
#define RSV_REG_APP R9
#define REGISTERS_SAVE(reg, offset) \
MOVD R0, offset+PTRACE_R0(reg); \
MOVD R1, offset+PTRACE_R1(reg); \
MOVD R2, offset+PTRACE_R2(reg); \
MOVD R3, offset+PTRACE_R3(reg); \
MOVD R4, offset+PTRACE_R4(reg); \
MOVD R5, offset+PTRACE_R5(reg); \
MOVD R6, offset+PTRACE_R6(reg); \
MOVD R7, offset+PTRACE_R7(reg); \
MOVD R8, offset+PTRACE_R8(reg); \
MOVD R10, offset+PTRACE_R10(reg); \
MOVD R11, offset+PTRACE_R11(reg); \
MOVD R12, offset+PTRACE_R12(reg); \
MOVD R13, offset+PTRACE_R13(reg); \
MOVD R14, offset+PTRACE_R14(reg); \
MOVD R15, offset+PTRACE_R15(reg); \
MOVD R16, offset+PTRACE_R16(reg); \
MOVD R17, offset+PTRACE_R17(reg); \
MOVD R19, offset+PTRACE_R19(reg); \
MOVD R20, offset+PTRACE_R20(reg); \
MOVD R21, offset+PTRACE_R21(reg); \
MOVD R22, offset+PTRACE_R22(reg); \
MOVD R23, offset+PTRACE_R23(reg); \
MOVD R24, offset+PTRACE_R24(reg); \
MOVD R25, offset+PTRACE_R25(reg); \
MOVD R26, offset+PTRACE_R26(reg); \
MOVD R27, offset+PTRACE_R27(reg); \
MOVD g, offset+PTRACE_R28(reg); \
MOVD R29, offset+PTRACE_R29(reg); \
MOVD R30, offset+PTRACE_R30(reg);
#define REGISTERS_LOAD(reg, offset) \
MOVD offset+PTRACE_R0(reg), R0; \
MOVD offset+PTRACE_R1(reg), R1; \
MOVD offset+PTRACE_R2(reg), R2; \
MOVD offset+PTRACE_R3(reg), R3; \
MOVD offset+PTRACE_R4(reg), R4; \
MOVD offset+PTRACE_R5(reg), R5; \
MOVD offset+PTRACE_R6(reg), R6; \
MOVD offset+PTRACE_R7(reg), R7; \
MOVD offset+PTRACE_R8(reg), R8; \
MOVD offset+PTRACE_R10(reg), R10; \
MOVD offset+PTRACE_R11(reg), R11; \
MOVD offset+PTRACE_R12(reg), R12; \
MOVD offset+PTRACE_R13(reg), R13; \
MOVD offset+PTRACE_R14(reg), R14; \
MOVD offset+PTRACE_R15(reg), R15; \
MOVD offset+PTRACE_R16(reg), R16; \
MOVD offset+PTRACE_R17(reg), R17; \
MOVD offset+PTRACE_R19(reg), R19; \
MOVD offset+PTRACE_R20(reg), R20; \
MOVD offset+PTRACE_R21(reg), R21; \
MOVD offset+PTRACE_R22(reg), R22; \
MOVD offset+PTRACE_R23(reg), R23; \
MOVD offset+PTRACE_R24(reg), R24; \
MOVD offset+PTRACE_R25(reg), R25; \
MOVD offset+PTRACE_R26(reg), R26; \
MOVD offset+PTRACE_R27(reg), R27; \
MOVD offset+PTRACE_R28(reg), g; \
MOVD offset+PTRACE_R29(reg), R29; \
MOVD offset+PTRACE_R30(reg), R30;
//NOP
#define nop31Instructions() \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f; \
WORD $0xd503201f;
#define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_ELx_EC_WFx (0x01)
/* Unallocated EC: 0x02 */
#define ESR_ELx_EC_CP15_32 (0x03)
#define ESR_ELx_EC_CP15_64 (0x04)
#define ESR_ELx_EC_CP14_MR (0x05)
#define ESR_ELx_EC_CP14_LS (0x06)
#define ESR_ELx_EC_FP_ASIMD (0x07)
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
/* Unallocated EC: 0x0d */
#define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
/* Unallocated EC: 0x14 */
#define ESR_ELx_EC_SVC64 (0x15)
#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
/* Unallocated EC: 0x1A - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
#define ESR_ELx_EC_PC_ALIGN (0x22)
/* Unallocated EC: 0x23 */
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
/* Unallocated EC: 0x27 */
#define ESR_ELx_EC_FP_EXC32 (0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
/* Unallocated EC: 0x2D - 0x2E */
#define ESR_ELx_EC_SERROR (0x2F)
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
/* Unallocated EC: 0x36 - 0x37 */
#define ESR_ELx_EC_BKPT32 (0x38)
/* Unallocated EC: 0x39 */
#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
/* Unallocted EC: 0x3B */
#define ESR_ELx_EC_BRK64 (0x3C)
/* Unallocated EC: 0x3D - 0x3F */
#define ESR_ELx_EC_MAX (0x3F)
#define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
#define ESR_ELx_IL_SHIFT (25)
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
/* ISS field definitions shared by different classes */
#define ESR_ELx_WNR_SHIFT (6)
#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
/* Asynchronous Error Type */
#define ESR_ELx_IDS_SHIFT (24)
#define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT)
#define ESR_ELx_AET_SHIFT (10)
#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT)
#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT)
#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT)
#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT)
#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
/* Shared ISS field definitions for Data/Instruction aborts */
#define ESR_ELx_SET_SHIFT (11)
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
#define ESR_ELx_FnV_SHIFT (10)
#define ESR_ELx_FnV (UL(1) << ESR_ELx_FnV_SHIFT)
#define ESR_ELx_EA_SHIFT (9)
#define ESR_ELx_EA (UL(1) << ESR_ELx_EA_SHIFT)
#define ESR_ELx_S1PTW_SHIFT (7)
#define ESR_ELx_S1PTW (UL(1) << ESR_ELx_S1PTW_SHIFT)
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
#define ESR_ELx_SAS_SHIFT (22)
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
#define ESR_ELx_SSE_SHIFT (21)
#define ESR_ELx_SSE (UL(1) << ESR_ELx_SSE_SHIFT)
#define ESR_ELx_SRT_SHIFT (16)
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
#define ESR_ELx_SF_SHIFT (15)
#define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT)
#define ESR_ELx_AR_SHIFT (14)
#define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT)
#define ESR_ELx_CM_SHIFT (8)
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
#define LOAD_KERNEL_ADDRESS(from, to) \
MOVD from, to; \
ORR $0xffff000000000000, to, to;
// LOAD_KERNEL_STACK loads the kernel temporary stack.
#define LOAD_KERNEL_STACK(from) \
LOAD_KERNEL_ADDRESS(CPU_SELF(from), RSV_REG); \
MOVD $CPU_STACK_TOP(RSV_REG), RSV_REG; \
MOVD RSV_REG, RSP; \
ISB $15; \
DSB $15;
#define SWITCH_TO_APP_PAGETABLE(from) \
MOVD CPU_TTBR0_APP(from), RSV_REG; \
WORD $0xd5182012; \ // MSR R18, TTBR0_EL1
ISB $15; \
DSB $15;
#define SWITCH_TO_KVM_PAGETABLE(from) \
MOVD CPU_TTBR0_KVM(from), RSV_REG; \
WORD $0xd5182012; \ // MSR R18, TTBR0_EL1
ISB $15; \
DSB $15;
#define IRQ_ENABLE \
MSR $2, DAIFSet;
#define IRQ_DISABLE \
MSR $2, DAIFClr;
#define KERNEL_ENTRY_FROM_EL0 \
SUB $16, RSP, RSP; \ // step1, save r18, r9 into kernel temporary stack.
STP (RSV_REG, RSV_REG_APP), 16*0(RSP); \
WORD $0xd538d092; \ //MRS TPIDR_EL1, R18, step2, switch user pagetable.
SWITCH_TO_KVM_PAGETABLE(RSV_REG); \
WORD $0xd538d092; \ //MRS TPIDR_EL1, R18
MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP; \ // step3, load app context pointer.
REGISTERS_SAVE(RSV_REG_APP, 0); \ // step4, save app context.
MOVD RSV_REG_APP, R20; \
LDP 16*0(RSP), (RSV_REG, RSV_REG_APP); \
ADD $16, RSP, RSP; \
MOVD RSV_REG, PTRACE_R18(R20); \
MOVD RSV_REG_APP, PTRACE_R9(R20); \
MOVD R20, RSV_REG_APP; \
WORD $0xd5384003; \ // MRS SPSR_EL1, R3
MOVD R3, PTRACE_PSTATE(RSV_REG_APP); \
MRS ELR_EL1, R3; \
MOVD R3, PTRACE_PC(RSV_REG_APP); \
WORD $0xd5384103; \ // MRS SP_EL0, R3
MOVD R3, PTRACE_SP(RSV_REG_APP);
#define KERNEL_ENTRY_FROM_EL1 \
WORD $0xd538d092; \ //MRS TPIDR_EL1, R18
REGISTERS_SAVE(RSV_REG, CPU_REGISTERS); \ // save sentry context
MOVD RSV_REG_APP, CPU_REGISTERS+PTRACE_R9(RSV_REG); \
WORD $0xd5384004; \ // MRS SPSR_EL1, R4
MOVD R4, CPU_REGISTERS+PTRACE_PSTATE(RSV_REG); \
MRS ELR_EL1, R4; \
MOVD R4, CPU_REGISTERS+PTRACE_PC(RSV_REG); \
MOVD RSP, R4; \
MOVD R4, CPU_REGISTERS+PTRACE_SP(RSV_REG);
TEXT ·Halt(SB),NOSPLIT,$0
// clear bluepill.
WORD $0xd538d092 //MRS TPIDR_EL1, R18
CMP RSV_REG, R9
BNE mmio_exit
MOVD $0, CPU_REGISTERS+PTRACE_R9(RSV_REG)
mmio_exit:
// MMIO_EXIT.
MOVD $0, R9
MOVD R0, 0xffff000000001000(R9)
B ·kernelExitToEl1(SB)
TEXT ·Shutdown(SB),NOSPLIT,$0
// PSCI EVENT.
MOVD $0x84000009, R0
HVC $0
// See kernel.go.
TEXT ·Current(SB),NOSPLIT,$0-8
MOVD CPU_SELF(RSV_REG), R8
MOVD R8, ret+0(FP)
RET
#define STACK_FRAME_SIZE 16
TEXT ·kernelExitToEl0(SB),NOSPLIT,$0
ERET()
TEXT ·kernelExitToEl1(SB),NOSPLIT,$0
ERET()
TEXT ·Start(SB),NOSPLIT,$0
IRQ_DISABLE
MOVD R8, RSV_REG
ORR $0xffff000000000000, RSV_REG, RSV_REG
WORD $0xd518d092 //MSR R18, TPIDR_EL1
B ·kernelExitToEl1(SB)
TEXT ·El1_sync_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_irq_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_fiq_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_error_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_sync(SB),NOSPLIT,$0
KERNEL_ENTRY_FROM_EL1
WORD $0xd5385219 // MRS ESR_EL1, R25
LSR $ESR_ELx_EC_SHIFT, R25, R24
CMP $ESR_ELx_EC_DABT_CUR, R24
BEQ el1_da
CMP $ESR_ELx_EC_IABT_CUR, R24
BEQ el1_ia
CMP $ESR_ELx_EC_SYS64, R24
BEQ el1_undef
CMP $ESR_ELx_EC_SP_ALIGN, R24
BEQ el1_sp_pc
CMP $ESR_ELx_EC_PC_ALIGN, R24
BEQ el1_sp_pc
CMP $ESR_ELx_EC_UNKNOWN, R24
BEQ el1_undef
CMP $ESR_ELx_EC_SVC64, R24
BEQ el1_svc
CMP $ESR_ELx_EC_BREAKPT_CUR, R24
BGE el1_dbg
B el1_invalid
el1_da:
B ·Halt(SB)
el1_ia:
B ·Halt(SB)
el1_sp_pc:
B ·Shutdown(SB)
el1_undef:
B ·Shutdown(SB)
el1_svc:
B ·Halt(SB)
el1_dbg:
B ·Shutdown(SB)
el1_invalid:
B ·Shutdown(SB)
TEXT ·El1_irq(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_fiq(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El1_error(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_sync(SB),NOSPLIT,$0
KERNEL_ENTRY_FROM_EL0
WORD $0xd5385219 // MRS ESR_EL1, R25
LSR $ESR_ELx_EC_SHIFT, R25, R24
CMP $ESR_ELx_EC_SVC64, R24
BEQ el0_svc
CMP $ESR_ELx_EC_DABT_LOW, R24
BEQ el0_da
CMP $ESR_ELx_EC_IABT_LOW, R24
BEQ el0_ia
CMP $ESR_ELx_EC_FP_ASIMD, R24
BEQ el0_fpsimd_acc
CMP $ESR_ELx_EC_SVE, R24
BEQ el0_sve_acc
CMP $ESR_ELx_EC_FP_EXC64, R24
BEQ el0_fpsimd_exc
CMP $ESR_ELx_EC_SP_ALIGN, R24
BEQ el0_sp_pc
CMP $ESR_ELx_EC_PC_ALIGN, R24
BEQ el0_sp_pc
CMP $ESR_ELx_EC_UNKNOWN, R24
BEQ el0_undef
CMP $ESR_ELx_EC_BREAKPT_LOW, R24
BGE el0_dbg
B el0_invalid
el0_svc:
B ·Halt(SB)
el0_da:
B ·Halt(SB)
el0_ia:
B ·Shutdown(SB)
el0_fpsimd_acc:
B ·Shutdown(SB)
el0_sve_acc:
B ·Shutdown(SB)
el0_fpsimd_exc:
B ·Shutdown(SB)
el0_sp_pc:
B ·Shutdown(SB)
el0_undef:
B ·Shutdown(SB)
el0_dbg:
B ·Shutdown(SB)
el0_invalid:
B ·Shutdown(SB)
TEXT ·El0_irq(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_fiq(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_error(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_sync_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_irq_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_fiq_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·El0_error_invalid(SB),NOSPLIT,$0
B ·Shutdown(SB)
TEXT ·Vectors(SB),NOSPLIT,$0
B ·El1_sync_invalid(SB)
nop31Instructions()
B ·El1_irq_invalid(SB)
nop31Instructions()
B ·El1_fiq_invalid(SB)
nop31Instructions()
B ·El1_error_invalid(SB)
nop31Instructions()
B ·El1_sync(SB)
nop31Instructions()
B ·El1_irq(SB)
nop31Instructions()
B ·El1_fiq(SB)
nop31Instructions()
B ·El1_error(SB)
nop31Instructions()
B ·El0_sync(SB)
nop31Instructions()
B ·El0_irq(SB)
nop31Instructions()
B ·El0_fiq(SB)
nop31Instructions()
B ·El0_error(SB)
nop31Instructions()
B ·El0_sync_invalid(SB)
nop31Instructions()
B ·El0_irq_invalid(SB)
nop31Instructions()
B ·El0_fiq_invalid(SB)
nop31Instructions()
B ·El0_error_invalid(SB)
nop31Instructions()
WORD $0xd503201f //nop
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()
WORD $0xd503201f
nop31Instructions()

View File

@ -0,0 +1,58 @@
// Copyright 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
// init initializes architecture-specific state.
func (k *Kernel) init(opts KernelOpts) {
// Save the root page tables.
k.PageTables = opts.PageTables
}
// init initializes architecture-specific state.
func (c *CPU) init() {
// Set the kernel stack pointer(virtual address).
c.registers.Sp = uint64(c.StackTop())
}
// StackTop returns the kernel's stack address.
//
//go:nosplit
func (c *CPU) StackTop() uint64 {
return uint64(kernelAddr(&c.stack[0])) + uint64(len(c.stack))
}
// IsCanonical indicates whether addr is canonical per the arm64 spec.
//
//go:nosplit
func IsCanonical(addr uint64) bool {
return addr <= 0x0000ffffffffffff || addr > 0xffff000000000000
}
//go:nosplit
func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {
// Sanitize registers.
regs := switchOpts.Registers
regs.Pstate &= ^uint64(UserFlagsClear)
regs.Pstate |= UserFlagsSet
kernelExitToEl0()
vector = c.vecCode
// Perform the switch.
return
}

View File

@ -0,0 +1,25 @@
// Copyright 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
// LoadFloatingPoint loads floating point state by the most efficient mechanism
// available (set by Init).
var LoadFloatingPoint func(*byte)
// SaveFloatingPoint saves floating point state by the most efficient mechanism
// available (set by Init).
var SaveFloatingPoint func(*byte)

View File

@ -0,0 +1,124 @@
// Copyright 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build arm64
package ring0
import (
"fmt"
"io"
"reflect"
"syscall"
)
// Emit prints architecture-specific offsets.
func Emit(w io.Writer) {
fmt.Fprintf(w, "// Automatically generated, do not edit.\n")
c := &CPU{}
fmt.Fprintf(w, "\n// CPU offsets.\n")
fmt.Fprintf(w, "#define CPU_SELF 0x%02x\n", reflect.ValueOf(&c.self).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_REGISTERS 0x%02x\n", reflect.ValueOf(&c.registers).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_STACK_TOP 0x%02x\n", reflect.ValueOf(&c.stack[0]).Pointer()-reflect.ValueOf(c).Pointer()+uintptr(len(c.stack)))
fmt.Fprintf(w, "#define CPU_ERROR_CODE 0x%02x\n", reflect.ValueOf(&c.errorCode).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_ERROR_TYPE 0x%02x\n", reflect.ValueOf(&c.errorType).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_FAULT_ADDR 0x%02x\n", reflect.ValueOf(&c.faultAddr).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_TTBR0_KVM 0x%02x\n", reflect.ValueOf(&c.ttbr0Kvm).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_TTBR0_APP 0x%02x\n", reflect.ValueOf(&c.ttbr0App).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_VECTOR_CODE 0x%02x\n", reflect.ValueOf(&c.vecCode).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "#define CPU_APP_ADDR 0x%02x\n", reflect.ValueOf(&c.appAddr).Pointer()-reflect.ValueOf(c).Pointer())
fmt.Fprintf(w, "\n// Bits.\n")
fmt.Fprintf(w, "#define _KERNEL_FLAGS 0x%02x\n", KernelFlagsSet)
fmt.Fprintf(w, "\n// Vectors.\n")
fmt.Fprintf(w, "#define El1SyncInvalid 0x%02x\n", El1SyncInvalid)
fmt.Fprintf(w, "#define El1IrqInvalid 0x%02x\n", El1IrqInvalid)
fmt.Fprintf(w, "#define El1FiqInvalid 0x%02x\n", El1FiqInvalid)
fmt.Fprintf(w, "#define El1ErrorInvalid 0x%02x\n", El1ErrorInvalid)
fmt.Fprintf(w, "#define El1Sync 0x%02x\n", El1Sync)
fmt.Fprintf(w, "#define El1Irq 0x%02x\n", El1Irq)
fmt.Fprintf(w, "#define El1Fiq 0x%02x\n", El1Fiq)
fmt.Fprintf(w, "#define El1Error 0x%02x\n", El1Error)
fmt.Fprintf(w, "#define El0Sync 0x%02x\n", El0Sync)
fmt.Fprintf(w, "#define El0Irq 0x%02x\n", El0Irq)
fmt.Fprintf(w, "#define El0Fiq 0x%02x\n", El0Fiq)
fmt.Fprintf(w, "#define El0Error 0x%02x\n", El0Error)
fmt.Fprintf(w, "#define El0Sync_invalid 0x%02x\n", El0Sync_invalid)
fmt.Fprintf(w, "#define El0Irq_invalid 0x%02x\n", El0Irq_invalid)
fmt.Fprintf(w, "#define El0Fiq_invalid 0x%02x\n", El0Fiq_invalid)
fmt.Fprintf(w, "#define El0Error_invalid 0x%02x\n", El0Error_invalid)
fmt.Fprintf(w, "#define El1Sync_da 0x%02x\n", El1Sync_da)
fmt.Fprintf(w, "#define El1Sync_ia 0x%02x\n", El1Sync_ia)
fmt.Fprintf(w, "#define El1Sync_sp_pc 0x%02x\n", El1Sync_sp_pc)
fmt.Fprintf(w, "#define El1Sync_undef 0x%02x\n", El1Sync_undef)
fmt.Fprintf(w, "#define El1Sync_dbg 0x%02x\n", El1Sync_dbg)
fmt.Fprintf(w, "#define El1Sync_inv 0x%02x\n", El1Sync_inv)
fmt.Fprintf(w, "#define El0Sync_svc 0x%02x\n", El0Sync_svc)
fmt.Fprintf(w, "#define El0Sync_da 0x%02x\n", El0Sync_da)
fmt.Fprintf(w, "#define El0Sync_ia 0x%02x\n", El0Sync_ia)
fmt.Fprintf(w, "#define El0Sync_fpsimd_acc 0x%02x\n", El0Sync_fpsimd_acc)
fmt.Fprintf(w, "#define El0Sync_sve_acc 0x%02x\n", El0Sync_sve_acc)
fmt.Fprintf(w, "#define El0Sync_sys 0x%02x\n", El0Sync_sys)
fmt.Fprintf(w, "#define El0Sync_sp_pc 0x%02x\n", El0Sync_sp_pc)
fmt.Fprintf(w, "#define El0Sync_undef 0x%02x\n", El0Sync_undef)
fmt.Fprintf(w, "#define El0Sync_dbg 0x%02x\n", El0Sync_dbg)
fmt.Fprintf(w, "#define El0Sync_inv 0x%02x\n", El0Sync_inv)
fmt.Fprintf(w, "#define PageFault 0x%02x\n", PageFault)
fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall)
p := &syscall.PtraceRegs{}
fmt.Fprintf(w, "\n// Ptrace registers.\n")
fmt.Fprintf(w, "#define PTRACE_R0 0x%02x\n", reflect.ValueOf(&p.Regs[0]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R1 0x%02x\n", reflect.ValueOf(&p.Regs[1]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R2 0x%02x\n", reflect.ValueOf(&p.Regs[2]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R3 0x%02x\n", reflect.ValueOf(&p.Regs[3]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R4 0x%02x\n", reflect.ValueOf(&p.Regs[4]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R5 0x%02x\n", reflect.ValueOf(&p.Regs[5]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R6 0x%02x\n", reflect.ValueOf(&p.Regs[6]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R7 0x%02x\n", reflect.ValueOf(&p.Regs[7]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R8 0x%02x\n", reflect.ValueOf(&p.Regs[8]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R9 0x%02x\n", reflect.ValueOf(&p.Regs[9]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R10 0x%02x\n", reflect.ValueOf(&p.Regs[10]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R11 0x%02x\n", reflect.ValueOf(&p.Regs[11]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R12 0x%02x\n", reflect.ValueOf(&p.Regs[12]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R13 0x%02x\n", reflect.ValueOf(&p.Regs[13]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R14 0x%02x\n", reflect.ValueOf(&p.Regs[14]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R15 0x%02x\n", reflect.ValueOf(&p.Regs[15]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R16 0x%02x\n", reflect.ValueOf(&p.Regs[16]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R17 0x%02x\n", reflect.ValueOf(&p.Regs[17]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R18 0x%02x\n", reflect.ValueOf(&p.Regs[18]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R19 0x%02x\n", reflect.ValueOf(&p.Regs[19]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R20 0x%02x\n", reflect.ValueOf(&p.Regs[20]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R21 0x%02x\n", reflect.ValueOf(&p.Regs[21]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R22 0x%02x\n", reflect.ValueOf(&p.Regs[22]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R23 0x%02x\n", reflect.ValueOf(&p.Regs[23]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R24 0x%02x\n", reflect.ValueOf(&p.Regs[24]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R25 0x%02x\n", reflect.ValueOf(&p.Regs[25]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R26 0x%02x\n", reflect.ValueOf(&p.Regs[26]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R27 0x%02x\n", reflect.ValueOf(&p.Regs[27]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R28 0x%02x\n", reflect.ValueOf(&p.Regs[28]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R29 0x%02x\n", reflect.ValueOf(&p.Regs[29]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R30 0x%02x\n", reflect.ValueOf(&p.Regs[30]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_SP 0x%02x\n", reflect.ValueOf(&p.Sp).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_PC 0x%02x\n", reflect.ValueOf(&p.Pc).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_PSTATE 0x%02x\n", reflect.ValueOf(&p.Pstate).Pointer()-reflect.ValueOf(p).Pointer())
}