Merge release-20200211.0-18-g3c26f5e (automated)

This commit is contained in:
gVisor bot 2020-02-14 20:10:32 +00:00
commit 6f7ed62b42
13 changed files with 688 additions and 156 deletions

View File

@ -25,6 +25,8 @@ const (
)
// Stat represents struct stat.
//
// +marshal
type Stat struct {
Dev uint64
Ino uint64

View File

@ -25,6 +25,8 @@ const (
)
// Stat represents struct stat.
//
// +marshal
type Stat struct {
Dev uint64
Ino uint64

View File

@ -0,0 +1,193 @@
// Automatically generated marshal implementation. See tools/go_marshal.
package linux
import (
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
"reflect"
"runtime"
"unsafe"
)
// Marshallable types used by this file.
var _ marshal.Marshallable = (*RSeqCriticalSection)(nil)
var _ marshal.Marshallable = (*Timespec)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
func (r *RSeqCriticalSection) SizeBytes() int {
return 32
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (r *RSeqCriticalSection) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Version))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Start))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(r.PostCommitOffset))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Abort))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (r *RSeqCriticalSection) UnmarshalBytes(src []byte) {
r.Version = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
r.Flags = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
r.Start = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
r.PostCommitOffset = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
r.Abort = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
func (r *RSeqCriticalSection) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (r *RSeqCriticalSection) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(r))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (r *RSeqCriticalSection) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(r), src)
}
// CopyOut implements marshal.Marshallable.CopyOut.
func (r *RSeqCriticalSection) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// Bypass escape analysis on r. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on r.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(r)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by r's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = r.SizeBytes()
hdr.Cap = r.SizeBytes()
len, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until after the CopyOutBytes.
runtime.KeepAlive(r)
return len, err
}
// CopyIn implements marshal.Marshallable.CopyIn.
func (r *RSeqCriticalSection) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// Bypass escape analysis on r. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on r.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(r)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by r's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = r.SizeBytes()
hdr.Cap = r.SizeBytes()
len, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until after the CopyInBytes.
runtime.KeepAlive(r)
return len, err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
func (t *Timespec) SizeBytes() int {
return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (t *Timespec) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(t.Sec))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(t.Nsec))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (t *Timespec) UnmarshalBytes(src []byte) {
t.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
t.Nsec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
func (t *Timespec) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (t *Timespec) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(t))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (t *Timespec) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
// CopyOut implements marshal.Marshallable.CopyOut.
func (t *Timespec) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// Bypass escape analysis on t. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on t.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(t)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by t's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
len, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until after the CopyOutBytes.
runtime.KeepAlive(t)
return len, err
}
// CopyIn implements marshal.Marshallable.CopyIn.
func (t *Timespec) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// Bypass escape analysis on t. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on t.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(t)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by t's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
len, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until after the CopyInBytes.
runtime.KeepAlive(t)
return len, err
}

View File

@ -0,0 +1,182 @@
// Automatically generated marshal implementation. See tools/go_marshal.
// +build amd64
package linux
import (
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
"reflect"
"runtime"
"unsafe"
)
// Marshallable types used by this file.
var _ marshal.Marshallable = (*Stat)(nil)
var _ marshal.Marshallable = (*Timespec)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *Stat) SizeBytes() int {
return 96 +
s.ATime.SizeBytes() +
s.MTime.SizeBytes() +
s.CTime.SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Stat) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Nlink))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blksize))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
dst = dst[s.ATime.SizeBytes():]
s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
dst = dst[s.MTime.SizeBytes():]
s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
dst = dst[s.CTime.SizeBytes():]
// Padding: dst[:sizeof(int64)*3] ~= [3]int64{0}
dst = dst[24:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Stat) UnmarshalBytes(src []byte) {
s.Dev = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Ino = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Nlink = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Mode = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.UID = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.GID = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
s.Rdev = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Blksize = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
src = src[s.ATime.SizeBytes():]
s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
src = src[s.MTime.SizeBytes():]
s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
src = src[s.CTime.SizeBytes():]
// Padding: ~ copy([3]int64(s._), src[:sizeof(int64)*3])
src = src[24:]
}
// Packed implements marshal.Marshallable.Packed.
func (s *Stat) Packed() bool {
return s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
s.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
}
}
// CopyOut implements marshal.Marshallable.CopyOut.
func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
s.MarshalBytes(buf)
return task.CopyOutBytes(addr, buf)
}
// Bypass escape analysis on s. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on s.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(s)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by s's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
len, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until after the CopyOutBytes.
runtime.KeepAlive(s)
return len, err
}
// CopyIn implements marshal.Marshallable.CopyIn.
func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
n, err := task.CopyInBytes(addr, buf)
if err != nil {
return n, err
}
s.UnmarshalBytes(buf)
return n, nil
}
// Bypass escape analysis on s. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on s.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(s)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by s's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
len, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until after the CopyInBytes.
runtime.KeepAlive(s)
return len, err
}

View File

@ -0,0 +1,186 @@
// Automatically generated marshal implementation. See tools/go_marshal.
// +build arm64
package linux
import (
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
"reflect"
"runtime"
"unsafe"
)
// Marshallable types used by this file.
var _ marshal.Marshallable = (*Stat)(nil)
var _ marshal.Marshallable = (*Timespec)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *Stat) SizeBytes() int {
return 80 +
s.ATime.SizeBytes() +
s.MTime.SizeBytes() +
s.CTime.SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Stat) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
dst = dst[8:]
// Padding: dst[:sizeof(uint64)] ~= uint64(0)
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
dst = dst[s.ATime.SizeBytes():]
s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
dst = dst[s.MTime.SizeBytes():]
s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
dst = dst[s.CTime.SizeBytes():]
// Padding: dst[:sizeof(int32)*2] ~= [2]int32{0}
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Stat) UnmarshalBytes(src []byte) {
s.Dev = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Ino = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
s.Mode = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.Nlink = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.UID = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.GID = usermem.ByteOrder.Uint32(src[:4])
src = src[4:]
s.Rdev = usermem.ByteOrder.Uint64(src[:8])
src = src[8:]
// Padding: var _ uint64 ~= src[:sizeof(uint64)]
src = src[8:]
s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Blksize = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
src = src[s.ATime.SizeBytes():]
s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
src = src[s.MTime.SizeBytes():]
s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
src = src[s.CTime.SizeBytes():]
// Padding: ~ copy([2]int32(s._), src[:sizeof(int32)*2])
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
func (s *Stat) Packed() bool {
return s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
s.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
}
}
// CopyOut implements marshal.Marshallable.CopyOut.
func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
s.MarshalBytes(buf)
return task.CopyOutBytes(addr, buf)
}
// Bypass escape analysis on s. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on s.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(s)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by s's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
len, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until after the CopyOutBytes.
runtime.KeepAlive(s)
return len, err
}
// CopyIn implements marshal.Marshallable.CopyIn.
func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
n, err := task.CopyInBytes(addr, buf)
if err != nil {
return n, err
}
s.UnmarshalBytes(buf)
return n, nil
}
// Bypass escape analysis on s. The no-op arithmetic operation on the
// pointer makes the compiler think val doesn't depend on s.
// See src/runtime/stubs.go:noescape() in the golang toolchain.
ptr := unsafe.Pointer(s)
val := uintptr(ptr)
val = val^0
// Construct a slice backed by s's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
hdr.Data = val
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
len, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until after the CopyInBytes.
runtime.KeepAlive(s)
return len, err
}

View File

@ -101,6 +101,8 @@ func NsecToTimeT(nsec int64) TimeT {
}
// Timespec represents struct timespec in <time.h>.
//
// +marshal
type Timespec struct {
Sec int64
Nsec int64

View File

@ -1,6 +1,5 @@
// automatically generated by stateify.
// +build amd64
// +build amd64
package linux

View File

@ -1,6 +1,5 @@
// automatically generated by stateify.
// +build arm64
// +build arm64
package linux

View File

@ -23,6 +23,24 @@ import (
"gvisor.dev/gvisor/pkg/usermem"
)
func statFromAttrs(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr) linux.Stat {
return linux.Stat{
Dev: sattr.DeviceID,
Ino: sattr.InodeID,
Nlink: uattr.Links,
Mode: sattr.Type.LinuxType() | uint32(uattr.Perms.LinuxMode()),
UID: uint32(uattr.Owner.UID.In(t.UserNamespace()).OrOverflow()),
GID: uint32(uattr.Owner.GID.In(t.UserNamespace()).OrOverflow()),
Rdev: uint64(linux.MakeDeviceID(sattr.DeviceFileMajor, sattr.DeviceFileMinor)),
Size: uattr.Size,
Blksize: sattr.BlockSize,
Blocks: uattr.Usage / 512,
ATime: uattr.AccessTime.Timespec(),
MTime: uattr.ModificationTime.Timespec(),
CTime: uattr.StatusChangeTime.Timespec(),
}
}
// Stat implements linux syscall stat(2).
func Stat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
addr := args[0].Pointer()
@ -112,7 +130,9 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err
if err != nil {
return err
}
return copyOutStat(t, statAddr, d.Inode.StableAttr, uattr)
s := statFromAttrs(t, d.Inode.StableAttr, uattr)
_, err = s.CopyOut(t, statAddr)
return err
}
// fstat implements fstat for the given *fs.File.
@ -121,7 +141,9 @@ func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error {
if err != nil {
return err
}
return copyOutStat(t, statAddr, f.Dirent.Inode.StableAttr, uattr)
s := statFromAttrs(t, f.Dirent.Inode.StableAttr, uattr)
_, err = s.CopyOut(t, statAddr)
return err
}
// Statx implements linux syscall statx(2).

View File

@ -1,75 +0,0 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build amd64
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/usermem"
)
// copyOutStat copies the attributes (sattr, uattr) to the struct stat at
// address dst in t's address space. It encodes the stat struct to bytes
// manually, as stat() is a very common syscall for many applications, and
// t.CopyObjectOut has noticeable performance impact due to its many slice
// allocations and use of reflection.
func copyOutStat(t *kernel.Task, dst usermem.Addr, sattr fs.StableAttr, uattr fs.UnstableAttr) error {
b := t.CopyScratchBuffer(int(linux.SizeOfStat))[:0]
// Dev (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(sattr.DeviceID))
// Ino (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(sattr.InodeID))
// Nlink (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uattr.Links)
// Mode (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, sattr.Type.LinuxType()|uint32(uattr.Perms.LinuxMode()))
// UID (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(uattr.Owner.UID.In(t.UserNamespace()).OrOverflow()))
// GID (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(uattr.Owner.GID.In(t.UserNamespace()).OrOverflow()))
// Padding (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, 0)
// Rdev (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(linux.MakeDeviceID(sattr.DeviceFileMajor, sattr.DeviceFileMinor)))
// Size (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(uattr.Size))
// Blksize (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(sattr.BlockSize))
// Blocks (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(uattr.Usage/512))
// ATime
atime := uattr.AccessTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(atime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(atime.Nsec))
// MTime
mtime := uattr.ModificationTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(mtime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(mtime.Nsec))
// CTime
ctime := uattr.StatusChangeTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(ctime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(ctime.Nsec))
_, err := t.CopyOutBytes(dst, b)
return err
}

View File

@ -1,77 +0,0 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build arm64
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/usermem"
)
// copyOutStat copies the attributes (sattr, uattr) to the struct stat at
// address dst in t's address space. It encodes the stat struct to bytes
// manually, as stat() is a very common syscall for many applications, and
// t.CopyObjectOut has noticeable performance impact due to its many slice
// allocations and use of reflection.
func copyOutStat(t *kernel.Task, dst usermem.Addr, sattr fs.StableAttr, uattr fs.UnstableAttr) error {
b := t.CopyScratchBuffer(int(linux.SizeOfStat))[:0]
// Dev (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(sattr.DeviceID))
// Ino (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(sattr.InodeID))
// Mode (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, sattr.Type.LinuxType()|uint32(uattr.Perms.LinuxMode()))
// Nlink (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(uattr.Links))
// UID (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(uattr.Owner.UID.In(t.UserNamespace()).OrOverflow()))
// GID (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(uattr.Owner.GID.In(t.UserNamespace()).OrOverflow()))
// Rdev (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(linux.MakeDeviceID(sattr.DeviceFileMajor, sattr.DeviceFileMinor)))
// Padding (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, 0)
// Size (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(uattr.Size))
// Blksize (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, uint32(sattr.BlockSize))
// Padding (uint32)
b = binary.AppendUint32(b, usermem.ByteOrder, 0)
// Blocks (uint64)
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(uattr.Usage/512))
// ATime
atime := uattr.AccessTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(atime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(atime.Nsec))
// MTime
mtime := uattr.ModificationTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(mtime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(mtime.Nsec))
// CTime
ctime := uattr.StatusChangeTime.Timespec()
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(ctime.Sec))
b = binary.AppendUint64(b, usermem.ByteOrder, uint64(ctime.Nsec))
_, err := t.CopyOutBytes(dst, b)
return err
}

View File

@ -0,0 +1,94 @@
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package marshal defines the Marshallable interface for
// serialize/deserializing go data structures to/from memory, according to the
// Linux ABI.
//
// Implementations of this interface are typically automatically generated by
// tools/go_marshal. See the go_marshal README for details.
package marshal
import (
"gvisor.dev/gvisor/pkg/usermem"
)
// Task provides a subset of kernel.Task, used in marshalling. We don't import
// the kernel package directly to avoid circular dependency.
type Task interface {
// CopyScratchBuffer provides a task goroutine-local scratch buffer. See
// kernel.CopyScratchBuffer.
CopyScratchBuffer(size int) []byte
// CopyOutBytes writes the contents of b to the task's memory. See
// kernel.CopyOutBytes.
CopyOutBytes(addr usermem.Addr, b []byte) (int, error)
// CopyInBytes reads the contents of the task's memory to b. See
// kernel.CopyInBytes.
CopyInBytes(addr usermem.Addr, b []byte) (int, error)
}
// Marshallable represents a type that can be marshalled to and from memory.
type Marshallable interface {
// SizeBytes is the size of the memory representation of a type in
// marshalled form.
SizeBytes() int
// MarshalBytes serializes a copy of a type to dst. dst must be at least
// SizeBytes() long.
MarshalBytes(dst []byte)
// UnmarshalBytes deserializes a type from src. src must be at least
// SizeBytes() long.
UnmarshalBytes(src []byte)
// Packed returns true if the marshalled size of the type is the same as the
// size it occupies in memory. This happens when the type has no fields
// starting at unaligned addresses (should always be true by default for ABI
// structs, verified by automatically generated tests when using
// go_marshal), and has no fields marked `marshal:"unaligned"`.
Packed() bool
// MarshalUnsafe serializes a type by bulk copying its in-memory
// representation to the dst buffer. This is only safe to do when the type
// has no implicit padding, see Marshallable.Packed. When Packed would
// return false, MarshalUnsafe should fall back to the safer but slower
// MarshalBytes.
MarshalUnsafe(dst []byte)
// UnmarshalUnsafe deserializes a type by directly copying to the underlying
// memory allocated for the object by the runtime.
//
// This allows much faster unmarshalling of types which have no implicit
// padding, see Marshallable.Packed. When Packed would return false,
// UnmarshalUnsafe should fall back to the safer but slower unmarshal
// mechanism implemented in UnmarshalBytes.
UnmarshalUnsafe(src []byte)
// CopyIn deserializes a Marshallable type from a task's memory. This may
// only be called from a task goroutine. This is more efficient than calling
// UnmarshalUnsafe on Marshallable.Packed types, as the type being
// marshalled does not escape. The implementation should avoid creating
// extra copies in memory by directly deserializing to the object's
// underlying memory.
CopyIn(task Task, addr usermem.Addr) (int, error)
// CopyOut serializes a Marshallable type to a task's memory. This may only
// be called from a task goroutine. This is more efficient than calling
// MarshalUnsafe on Marshallable.Packed types, as the type being serialized
// does not escape. The implementation should avoid creating extra copies in
// memory by directly serializing from the object's underlying memory.
CopyOut(task Task, addr usermem.Addr) (int, error)
}

View File

@ -0,0 +1,3 @@
// automatically generated by stateify.
package marshal