2019-04-29 21:25:05 +00:00
|
|
|
// Copyright 2018 The gVisor Authors.
|
2018-04-27 17:37:02 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package proc
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/abi/linux"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/context"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/kernel"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/limits"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/mm"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/usage"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/usermem"
|
|
|
|
"gvisor.dev/gvisor/pkg/syserror"
|
|
|
|
"gvisor.dev/gvisor/pkg/waiter"
|
2018-04-27 17:37:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// getTaskMM returns t's MemoryManager. If getTaskMM succeeds, the MemoryManager's
|
|
|
|
// users count is incremented, and must be decremented by the caller when it is
|
|
|
|
// no longer in use.
|
|
|
|
func getTaskMM(t *kernel.Task) (*mm.MemoryManager, error) {
|
|
|
|
if t.ExitState() == kernel.TaskExitDead {
|
|
|
|
return nil, syserror.ESRCH
|
|
|
|
}
|
|
|
|
var m *mm.MemoryManager
|
|
|
|
t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
m = t.MemoryManager()
|
|
|
|
})
|
|
|
|
if m == nil || !m.IncUsers() {
|
|
|
|
return nil, io.EOF
|
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// taskDir represents a task-level directory.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type taskDir struct {
|
|
|
|
ramfs.Dir
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
t *kernel.Task
|
|
|
|
pidns *kernel.PIDNamespace
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
var _ fs.InodeOperations = (*taskDir)(nil)
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// newTaskDir creates a new proc task entry.
|
2019-12-05 21:22:31 +00:00
|
|
|
func (p *proc) newTaskDir(t *kernel.Task, msrc *fs.MountSource, isThreadGroup bool) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
contents := map[string]*fs.Inode{
|
2019-12-05 21:22:31 +00:00
|
|
|
"auxv": newAuxvec(t, msrc),
|
|
|
|
"cmdline": newExecArgInode(t, msrc, cmdlineExecArg),
|
|
|
|
"comm": newComm(t, msrc),
|
|
|
|
"environ": newExecArgInode(t, msrc, environExecArg),
|
|
|
|
"exe": newExe(t, msrc),
|
|
|
|
"fd": newFdDir(t, msrc),
|
|
|
|
"fdinfo": newFdInfoDir(t, msrc),
|
|
|
|
"gid_map": newGIDMap(t, msrc),
|
|
|
|
"io": newIO(t, msrc, isThreadGroup),
|
2018-04-27 17:37:02 +00:00
|
|
|
"maps": newMaps(t, msrc),
|
|
|
|
"mountinfo": seqfile.NewSeqFileInode(t, &mountInfoFile{t: t}, msrc),
|
|
|
|
"mounts": seqfile.NewSeqFileInode(t, &mountsFile{t: t}, msrc),
|
|
|
|
"ns": newNamespaceDir(t, msrc),
|
2019-01-07 23:16:37 +00:00
|
|
|
"smaps": newSmaps(t, msrc),
|
2019-12-05 21:22:31 +00:00
|
|
|
"stat": newTaskStat(t, msrc, isThreadGroup, p.pidns),
|
2018-05-08 23:14:00 +00:00
|
|
|
"statm": newStatm(t, msrc),
|
2019-05-15 03:33:44 +00:00
|
|
|
"status": newStatus(t, msrc, p.pidns),
|
2018-04-27 17:37:02 +00:00
|
|
|
"uid_map": newUIDMap(t, msrc),
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
2019-12-05 21:22:31 +00:00
|
|
|
if isThreadGroup {
|
2019-05-15 03:33:44 +00:00
|
|
|
contents["task"] = p.newSubtasks(t, msrc)
|
|
|
|
}
|
|
|
|
if len(p.cgroupControllers) > 0 {
|
|
|
|
contents["cgroup"] = newCGroupInode(t, msrc, p.cgroupControllers)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2019-01-15 04:33:29 +00:00
|
|
|
|
2019-06-05 20:59:01 +00:00
|
|
|
// N.B. taskOwnedInodeOps enforces dumpability-based ownership.
|
2019-01-15 04:33:29 +00:00
|
|
|
d := &taskDir{
|
2019-05-15 03:33:44 +00:00
|
|
|
Dir: *ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0555)),
|
|
|
|
t: t,
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, d, msrc, fs.SpecialDirectory, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// subtasks represents a /proc/TID/task directory.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type subtasks struct {
|
|
|
|
ramfs.Dir
|
|
|
|
|
2019-05-15 03:33:44 +00:00
|
|
|
t *kernel.Task
|
|
|
|
p *proc
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
var _ fs.InodeOperations = (*subtasks)(nil)
|
|
|
|
|
2019-05-15 03:33:44 +00:00
|
|
|
func (p *proc) newSubtasks(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
s := &subtasks{
|
2019-05-15 03:33:44 +00:00
|
|
|
Dir: *ramfs.NewDir(t, nil, fs.RootOwner, fs.FilePermsFromMode(0555)),
|
|
|
|
t: t,
|
|
|
|
p: p,
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, s, msrc, fs.SpecialDirectory, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UnstableAttr returns unstable attributes of the subtasks.
|
|
|
|
func (s *subtasks) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) {
|
|
|
|
uattr, err := s.Dir.UnstableAttr(ctx, inode)
|
|
|
|
if err != nil {
|
|
|
|
return fs.UnstableAttr{}, err
|
|
|
|
}
|
|
|
|
// We can't rely on ramfs' implementation because the task directories are
|
|
|
|
// generated dynamically.
|
|
|
|
uattr.Links = uint64(2 + s.t.ThreadGroup().Count())
|
|
|
|
return uattr, nil
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// GetFile implements fs.InodeOperations.GetFile.
|
|
|
|
func (s *subtasks) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
|
2019-05-15 03:33:44 +00:00
|
|
|
return fs.NewFile(ctx, dirent, flags, &subtasksFile{t: s.t, pidns: s.p.pidns}), nil
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// +stateify savable
|
|
|
|
type subtasksFile struct {
|
2019-04-11 07:41:42 +00:00
|
|
|
fsutil.DirFileOperations `state:"nosave"`
|
|
|
|
fsutil.FileUseInodeUnstableAttr `state:"nosave"`
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
t *kernel.Task
|
|
|
|
pidns *kernel.PIDNamespace
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// Readdir implements fs.FileOperations.Readdir.
|
|
|
|
func (f *subtasksFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) {
|
|
|
|
dirCtx := fs.DirCtx{
|
|
|
|
Serializer: ser,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note that unlike most Readdir implementations, the offset here is
|
|
|
|
// not an index into the subtasks, but rather the TID of the next
|
|
|
|
// subtask to emit.
|
|
|
|
offset := file.Offset()
|
|
|
|
|
2019-06-20 07:19:05 +00:00
|
|
|
tasks := f.t.ThreadGroup().MemberIDs(f.pidns)
|
|
|
|
if len(tasks) == 0 {
|
2019-07-12 20:11:01 +00:00
|
|
|
return offset, syserror.ENOENT
|
2019-06-20 07:19:05 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
if offset == 0 {
|
|
|
|
// Serialize "." and "..".
|
|
|
|
root := fs.RootFromContext(ctx)
|
2019-04-10 23:35:22 +00:00
|
|
|
if root != nil {
|
|
|
|
defer root.DecRef()
|
|
|
|
}
|
2019-01-15 04:33:29 +00:00
|
|
|
dot, dotdot := file.Dirent.GetDotAttrs(root)
|
|
|
|
if err := dirCtx.DirEmit(".", dot); err != nil {
|
|
|
|
return offset, err
|
|
|
|
}
|
|
|
|
if err := dirCtx.DirEmit("..", dotdot); err != nil {
|
|
|
|
return offset, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize tasks.
|
2018-04-27 17:37:02 +00:00
|
|
|
taskInts := make([]int, 0, len(tasks))
|
|
|
|
for _, tid := range tasks {
|
|
|
|
taskInts = append(taskInts, int(tid))
|
|
|
|
}
|
|
|
|
|
Fix deadloop in proc subtask list
Readdir of /proc/x/task/ will get direntry entries
from tasks of specified taskgroup. Now the tasks
slice is unsorted, use sort.SearchInts search entry
from the slice may cause infinity loops.
The fix is sort the slice before search.
This issue could be easily reproduced via following
steps, revise Readdir in pkg/sentry/fs/proc/task.go,
force set taskInts into test slice
[]int{1, 11, 7, 5, 10, 6, 8, 3, 9, 2, 4},
then run docker image and run ls /proc/1/task, the
command will cause infinity loops.
2019-06-28 14:20:57 +00:00
|
|
|
sort.Sort(sort.IntSlice(taskInts))
|
2018-04-27 17:37:02 +00:00
|
|
|
// Find the task to start at.
|
2019-01-15 04:33:29 +00:00
|
|
|
idx := sort.SearchInts(taskInts, int(offset))
|
2018-04-27 17:37:02 +00:00
|
|
|
if idx == len(taskInts) {
|
|
|
|
return offset, nil
|
|
|
|
}
|
|
|
|
taskInts = taskInts[idx:]
|
|
|
|
|
|
|
|
var tid int
|
|
|
|
for _, tid = range taskInts {
|
|
|
|
name := strconv.FormatUint(uint64(tid), 10)
|
|
|
|
attr := fs.GenericDentAttr(fs.SpecialDirectory, device.ProcDevice)
|
|
|
|
if err := dirCtx.DirEmit(name, attr); err != nil {
|
|
|
|
// Returned offset is next tid to serialize.
|
2019-01-15 04:33:29 +00:00
|
|
|
return int64(tid), err
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// We serialized them all. Next offset should be higher than last
|
|
|
|
// serialized tid.
|
2019-01-15 04:33:29 +00:00
|
|
|
return int64(tid) + 1, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ fs.FileOperations = (*subtasksFile)(nil)
|
|
|
|
|
|
|
|
// Lookup loads an Inode in a task's subtask directory into a Dirent.
|
|
|
|
func (s *subtasks) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) {
|
|
|
|
tid, err := strconv.ParseUint(p, 10, 32)
|
|
|
|
if err != nil {
|
|
|
|
return nil, syserror.ENOENT
|
|
|
|
}
|
|
|
|
|
2019-05-15 03:33:44 +00:00
|
|
|
task := s.p.pidns.TaskWithID(kernel.ThreadID(tid))
|
2019-01-15 04:33:29 +00:00
|
|
|
if task == nil {
|
|
|
|
return nil, syserror.ENOENT
|
|
|
|
}
|
|
|
|
if task.ThreadGroup() != s.t.ThreadGroup() {
|
|
|
|
return nil, syserror.ENOENT
|
|
|
|
}
|
|
|
|
|
2019-05-15 03:33:44 +00:00
|
|
|
td := s.p.newTaskDir(task, dir.MountSource, false)
|
2019-06-14 01:39:43 +00:00
|
|
|
return fs.NewDirent(ctx, td, p), nil
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// exe is an fs.InodeOperations symlink for the /proc/PID/exe file.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type exe struct {
|
|
|
|
ramfs.Symlink
|
|
|
|
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
func newExe(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
exeSymlink := &exe{
|
|
|
|
Symlink: *ramfs.NewSymlink(t, fs.RootOwner, ""),
|
|
|
|
t: t,
|
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, exeSymlink, msrc, fs.Symlink, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *exe) executable() (d *fs.Dirent, err error) {
|
|
|
|
e.t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
mm := t.MemoryManager()
|
|
|
|
if mm == nil {
|
2019-04-29 21:03:04 +00:00
|
|
|
// TODO(b/34851096): Check shouldn't allow Readlink once the
|
2018-04-27 17:37:02 +00:00
|
|
|
// Task is zombied.
|
|
|
|
err = syserror.EACCES
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The MemoryManager may be destroyed, in which case
|
|
|
|
// MemoryManager.destroy will simply set the executable to nil
|
|
|
|
// (with locks held).
|
|
|
|
d = mm.Executable()
|
|
|
|
if d == nil {
|
|
|
|
err = syserror.ENOENT
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Readlink implements fs.InodeOperations.
|
|
|
|
func (e *exe) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {
|
|
|
|
if !kernel.ContextCanTrace(ctx, e.t, false) {
|
|
|
|
return "", syserror.EACCES
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pull out the executable for /proc/TID/exe.
|
|
|
|
exec, err := e.executable()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer exec.DecRef()
|
|
|
|
|
|
|
|
root := fs.RootFromContext(ctx)
|
|
|
|
if root == nil {
|
|
|
|
// This doesn't correspond to anything in Linux because the vfs is
|
|
|
|
// global there.
|
|
|
|
return "", syserror.EINVAL
|
|
|
|
}
|
|
|
|
defer root.DecRef()
|
|
|
|
n, _ := exec.FullName(root)
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// namespaceSymlink represents a symlink in the namespacefs, such as the files
|
|
|
|
// in /proc/<pid>/ns.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2019-01-15 04:33:29 +00:00
|
|
|
type namespaceSymlink struct {
|
2018-04-27 17:37:02 +00:00
|
|
|
ramfs.Symlink
|
|
|
|
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
func newNamespaceSymlink(t *kernel.Task, msrc *fs.MountSource, name string) *fs.Inode {
|
2019-04-29 21:03:04 +00:00
|
|
|
// TODO(rahat): Namespace symlinks should contain the namespace name and the
|
2018-04-27 17:37:02 +00:00
|
|
|
// inode number for the namespace instance, so for example user:[123456]. We
|
|
|
|
// currently fake the inode number by sticking the symlink inode in its
|
|
|
|
// place.
|
2019-01-15 04:33:29 +00:00
|
|
|
target := fmt.Sprintf("%s:[%d]", name, device.ProcDevice.NextIno())
|
|
|
|
n := &namespaceSymlink{
|
|
|
|
Symlink: *ramfs.NewSymlink(t, fs.RootOwner, target),
|
|
|
|
t: t,
|
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, n, msrc, fs.Symlink, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Getlink implements fs.InodeOperations.Getlink.
|
2019-01-15 04:33:29 +00:00
|
|
|
func (n *namespaceSymlink) Getlink(ctx context.Context, inode *fs.Inode) (*fs.Dirent, error) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if !kernel.ContextCanTrace(ctx, n.t, false) {
|
|
|
|
return nil, syserror.EACCES
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new regular file to fake the namespace file.
|
2019-01-15 04:33:29 +00:00
|
|
|
iops := fsutil.NewNoReadWriteFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0777), linux.PROC_SUPER_MAGIC)
|
2019-06-14 01:39:43 +00:00
|
|
|
return fs.NewDirent(ctx, newProcInode(ctx, iops, inode.MountSource, fs.RegularFile, nil), n.Symlink.Target), nil
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newNamespaceDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
contents := map[string]*fs.Inode{
|
|
|
|
"net": newNamespaceSymlink(t, msrc, "net"),
|
|
|
|
"pid": newNamespaceSymlink(t, msrc, "pid"),
|
|
|
|
"user": newNamespaceSymlink(t, msrc, "user"),
|
|
|
|
}
|
|
|
|
d := ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0511))
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, d, msrc, fs.SpecialDirectory, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// mapsData implements seqfile.SeqSource for /proc/[pid]/maps.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type mapsData struct {
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMaps(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &mapsData{t}), msrc, fs.SpecialFile, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (md *mapsData) mm() *mm.MemoryManager {
|
|
|
|
var tmm *mm.MemoryManager
|
|
|
|
md.t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
if mm := t.MemoryManager(); mm != nil {
|
|
|
|
// No additional reference is taken on mm here. This is safe
|
|
|
|
// because MemoryManager.destroy is required to leave the
|
|
|
|
// MemoryManager in a state where it's still usable as a SeqSource.
|
|
|
|
tmm = mm
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return tmm
|
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
|
|
|
|
func (md *mapsData) NeedsUpdate(generation int64) bool {
|
|
|
|
if mm := md.mm(); mm != nil {
|
|
|
|
return mm.NeedsUpdate(generation)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
|
2018-06-26 18:34:16 +00:00
|
|
|
func (md *mapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if mm := md.mm(); mm != nil {
|
2019-01-07 23:16:37 +00:00
|
|
|
return mm.ReadMapsSeqFileData(ctx, h)
|
|
|
|
}
|
|
|
|
return []seqfile.SeqData{}, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// smapsData implements seqfile.SeqSource for /proc/[pid]/smaps.
|
|
|
|
//
|
|
|
|
// +stateify savable
|
|
|
|
type smapsData struct {
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSmaps(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &smapsData{t}), msrc, fs.SpecialFile, t)
|
2019-01-07 23:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *smapsData) mm() *mm.MemoryManager {
|
|
|
|
var tmm *mm.MemoryManager
|
|
|
|
sd.t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
if mm := t.MemoryManager(); mm != nil {
|
|
|
|
// No additional reference is taken on mm here. This is safe
|
|
|
|
// because MemoryManager.destroy is required to leave the
|
|
|
|
// MemoryManager in a state where it's still usable as a SeqSource.
|
|
|
|
tmm = mm
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return tmm
|
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
|
|
|
|
func (sd *smapsData) NeedsUpdate(generation int64) bool {
|
|
|
|
if mm := sd.mm(); mm != nil {
|
|
|
|
return mm.NeedsUpdate(generation)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
|
|
|
|
func (sd *smapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
|
|
|
if mm := sd.mm(); mm != nil {
|
|
|
|
return mm.ReadSmapsSeqFileData(ctx, h)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
return []seqfile.SeqData{}, 0
|
|
|
|
}
|
|
|
|
|
2018-08-02 17:41:44 +00:00
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type taskStatData struct {
|
|
|
|
t *kernel.Task
|
|
|
|
|
|
|
|
// If tgstats is true, accumulate fault stats (not implemented) and CPU
|
|
|
|
// time across all tasks in t's thread group.
|
|
|
|
tgstats bool
|
|
|
|
|
|
|
|
// pidns is the PID namespace associated with the proc filesystem that
|
|
|
|
// includes the file using this statData.
|
|
|
|
pidns *kernel.PIDNamespace
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTaskStat(t *kernel.Task, msrc *fs.MountSource, showSubtasks bool, pidns *kernel.PIDNamespace) *fs.Inode {
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &taskStatData{t, showSubtasks /* tgstats */, pidns}), msrc, fs.SpecialFile, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate returns whether the generation is old or not.
|
|
|
|
func (s *taskStatData) NeedsUpdate(generation int64) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData returns data for the SeqFile reader.
|
|
|
|
// SeqData, the current generation and where in the file the handle corresponds to.
|
2018-06-26 18:34:16 +00:00
|
|
|
func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if h != nil {
|
|
|
|
return nil, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
fmt.Fprintf(&buf, "%d ", s.pidns.IDOfTask(s.t))
|
|
|
|
fmt.Fprintf(&buf, "(%s) ", s.t.Name())
|
|
|
|
fmt.Fprintf(&buf, "%c ", s.t.StateStatus()[0])
|
|
|
|
ppid := kernel.ThreadID(0)
|
|
|
|
if parent := s.t.Parent(); parent != nil {
|
|
|
|
ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup())
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&buf, "%d ", ppid)
|
|
|
|
fmt.Fprintf(&buf, "%d ", s.pidns.IDOfProcessGroup(s.t.ThreadGroup().ProcessGroup()))
|
|
|
|
fmt.Fprintf(&buf, "%d ", s.pidns.IDOfSession(s.t.ThreadGroup().Session()))
|
|
|
|
fmt.Fprintf(&buf, "0 0 " /* tty_nr tpgid */)
|
|
|
|
fmt.Fprintf(&buf, "0 " /* flags */)
|
|
|
|
fmt.Fprintf(&buf, "0 0 0 0 " /* minflt cminflt majflt cmajflt */)
|
|
|
|
var cputime usage.CPUStats
|
|
|
|
if s.tgstats {
|
|
|
|
cputime = s.t.ThreadGroup().CPUStats()
|
|
|
|
} else {
|
|
|
|
cputime = s.t.CPUStats()
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime))
|
|
|
|
cputime = s.t.ThreadGroup().JoinedChildCPUStats()
|
|
|
|
fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime))
|
|
|
|
fmt.Fprintf(&buf, "%d %d ", s.t.Priority(), s.t.Niceness())
|
|
|
|
fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Count())
|
2019-03-27 19:40:18 +00:00
|
|
|
|
|
|
|
// itrealvalue. Since kernel 2.6.17, this field is no longer
|
|
|
|
// maintained, and is hard coded as 0.
|
|
|
|
fmt.Fprintf(&buf, "0 ")
|
|
|
|
|
|
|
|
// Start time is relative to boot time, expressed in clock ticks.
|
|
|
|
fmt.Fprintf(&buf, "%d ", linux.ClockTFromDuration(s.t.StartTime().Sub(s.t.Kernel().Timekeeper().BootTime())))
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
var vss, rss uint64
|
|
|
|
s.t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
if mm := t.MemoryManager(); mm != nil {
|
|
|
|
vss = mm.VirtualMemorySize()
|
|
|
|
rss = mm.ResidentSetSize()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize)
|
2019-03-28 00:43:30 +00:00
|
|
|
|
|
|
|
// rsslim.
|
|
|
|
fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Limits().Get(limits.Rss).Cur)
|
|
|
|
|
|
|
|
fmt.Fprintf(&buf, "0 0 0 0 0 " /* startcode endcode startstack kstkesp kstkeip */)
|
2018-04-27 17:37:02 +00:00
|
|
|
fmt.Fprintf(&buf, "0 0 0 0 0 " /* signal blocked sigignore sigcatch wchan */)
|
|
|
|
fmt.Fprintf(&buf, "0 0 " /* nswap cnswap */)
|
|
|
|
terminationSignal := linux.Signal(0)
|
|
|
|
if s.t == s.t.ThreadGroup().Leader() {
|
|
|
|
terminationSignal = s.t.ThreadGroup().TerminationSignal()
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&buf, "%d ", terminationSignal)
|
|
|
|
fmt.Fprintf(&buf, "0 0 0 " /* processor rt_priority policy */)
|
|
|
|
fmt.Fprintf(&buf, "0 0 0 " /* delayacct_blkio_ticks guest_time cguest_time */)
|
|
|
|
fmt.Fprintf(&buf, "0 0 0 0 0 0 0 " /* start_data end_data start_brk arg_start arg_end env_start env_end */)
|
|
|
|
fmt.Fprintf(&buf, "0\n" /* exit_code */)
|
|
|
|
|
|
|
|
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*taskStatData)(nil)}}, 0
|
|
|
|
}
|
|
|
|
|
2018-05-08 23:14:00 +00:00
|
|
|
// statmData implements seqfile.SeqSource for /proc/[pid]/statm.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-05-08 23:14:00 +00:00
|
|
|
type statmData struct {
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
func newStatm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &statmData{t}), msrc, fs.SpecialFile, t)
|
2018-05-08 23:14:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
|
|
|
|
func (s *statmData) NeedsUpdate(generation int64) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
|
2018-06-26 18:34:16 +00:00
|
|
|
func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
2018-05-08 23:14:00 +00:00
|
|
|
if h != nil {
|
|
|
|
return nil, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var vss, rss uint64
|
|
|
|
s.t.WithMuLocked(func(t *kernel.Task) {
|
|
|
|
if mm := t.MemoryManager(); mm != nil {
|
|
|
|
vss = mm.VirtualMemorySize()
|
|
|
|
rss = mm.ResidentSetSize()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize)
|
|
|
|
|
|
|
|
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statmData)(nil)}}, 0
|
|
|
|
}
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// statusData implements seqfile.SeqSource for /proc/[pid]/status.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type statusData struct {
|
|
|
|
t *kernel.Task
|
|
|
|
pidns *kernel.PIDNamespace
|
|
|
|
}
|
|
|
|
|
|
|
|
func newStatus(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace) *fs.Inode {
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &statusData{t, pidns}), msrc, fs.SpecialFile, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
|
|
|
|
func (s *statusData) NeedsUpdate(generation int64) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
|
2018-06-26 18:34:16 +00:00
|
|
|
func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if h != nil {
|
|
|
|
return nil, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
fmt.Fprintf(&buf, "Name:\t%s\n", s.t.Name())
|
|
|
|
fmt.Fprintf(&buf, "State:\t%s\n", s.t.StateStatus())
|
|
|
|
fmt.Fprintf(&buf, "Tgid:\t%d\n", s.pidns.IDOfThreadGroup(s.t.ThreadGroup()))
|
|
|
|
fmt.Fprintf(&buf, "Pid:\t%d\n", s.pidns.IDOfTask(s.t))
|
|
|
|
ppid := kernel.ThreadID(0)
|
|
|
|
if parent := s.t.Parent(); parent != nil {
|
|
|
|
ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup())
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&buf, "PPid:\t%d\n", ppid)
|
|
|
|
tpid := kernel.ThreadID(0)
|
|
|
|
if tracer := s.t.Tracer(); tracer != nil {
|
|
|
|
tpid = s.pidns.IDOfTask(tracer)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&buf, "TracerPid:\t%d\n", tpid)
|
|
|
|
var fds int
|
2019-05-29 23:48:19 +00:00
|
|
|
var vss, rss, data uint64
|
2018-04-27 17:37:02 +00:00
|
|
|
s.t.WithMuLocked(func(t *kernel.Task) {
|
2019-07-03 02:27:51 +00:00
|
|
|
if fdTable := t.FDTable(); fdTable != nil {
|
|
|
|
fds = fdTable.Size()
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if mm := t.MemoryManager(); mm != nil {
|
|
|
|
vss = mm.VirtualMemorySize()
|
|
|
|
rss = mm.ResidentSetSize()
|
2019-05-29 23:48:19 +00:00
|
|
|
data = mm.VirtualDataSize()
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
fmt.Fprintf(&buf, "FDSize:\t%d\n", fds)
|
|
|
|
fmt.Fprintf(&buf, "VmSize:\t%d kB\n", vss>>10)
|
|
|
|
fmt.Fprintf(&buf, "VmRSS:\t%d kB\n", rss>>10)
|
2019-05-29 23:48:19 +00:00
|
|
|
fmt.Fprintf(&buf, "VmData:\t%d kB\n", data>>10)
|
2018-04-27 17:37:02 +00:00
|
|
|
fmt.Fprintf(&buf, "Threads:\t%d\n", s.t.ThreadGroup().Count())
|
|
|
|
creds := s.t.Credentials()
|
|
|
|
fmt.Fprintf(&buf, "CapInh:\t%016x\n", creds.InheritableCaps)
|
|
|
|
fmt.Fprintf(&buf, "CapPrm:\t%016x\n", creds.PermittedCaps)
|
|
|
|
fmt.Fprintf(&buf, "CapEff:\t%016x\n", creds.EffectiveCaps)
|
|
|
|
fmt.Fprintf(&buf, "CapBnd:\t%016x\n", creds.BoundingCaps)
|
|
|
|
fmt.Fprintf(&buf, "Seccomp:\t%d\n", s.t.SeccompMode())
|
|
|
|
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statusData)(nil)}}, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// ioUsage is the /proc/<pid>/io and /proc/<pid>/task/<tid>/io data provider.
|
|
|
|
type ioUsage interface {
|
|
|
|
// IOUsage returns the io usage data.
|
|
|
|
IOUsage() *usage.IO
|
|
|
|
}
|
|
|
|
|
2018-08-02 17:41:44 +00:00
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type ioData struct {
|
|
|
|
ioUsage
|
|
|
|
}
|
|
|
|
|
2019-12-05 21:22:31 +00:00
|
|
|
func newIO(t *kernel.Task, msrc *fs.MountSource, isThreadGroup bool) *fs.Inode {
|
|
|
|
if isThreadGroup {
|
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &ioData{t.ThreadGroup()}), msrc, fs.SpecialFile, t)
|
|
|
|
}
|
|
|
|
return newProcInode(t, seqfile.NewSeqFile(t, &ioData{t}), msrc, fs.SpecialFile, t)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NeedsUpdate returns whether the generation is old or not.
|
|
|
|
func (i *ioData) NeedsUpdate(generation int64) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadSeqFileData returns data for the SeqFile reader.
|
|
|
|
// SeqData, the current generation and where in the file the handle corresponds to.
|
2018-06-26 18:34:16 +00:00
|
|
|
func (i *ioData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if h != nil {
|
|
|
|
return nil, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
io := usage.IO{}
|
|
|
|
io.Accumulate(i.IOUsage())
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
2019-12-03 23:06:18 +00:00
|
|
|
fmt.Fprintf(&buf, "rchar: %d\n", io.CharsRead)
|
2018-04-27 17:37:02 +00:00
|
|
|
fmt.Fprintf(&buf, "wchar: %d\n", io.CharsWritten)
|
|
|
|
fmt.Fprintf(&buf, "syscr: %d\n", io.ReadSyscalls)
|
|
|
|
fmt.Fprintf(&buf, "syscw: %d\n", io.WriteSyscalls)
|
|
|
|
fmt.Fprintf(&buf, "read_bytes: %d\n", io.BytesRead)
|
|
|
|
fmt.Fprintf(&buf, "write_bytes: %d\n", io.BytesWritten)
|
|
|
|
fmt.Fprintf(&buf, "cancelled_write_bytes: %d\n", io.BytesWriteCancelled)
|
|
|
|
|
|
|
|
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*ioData)(nil)}}, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// comm is a file containing the command name for a task.
|
|
|
|
//
|
|
|
|
// On Linux, /proc/[pid]/comm is writable, and writing to the comm file changes
|
|
|
|
// the thread name. We don't implement this yet as there are no known users of
|
|
|
|
// this feature.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type comm struct {
|
2019-01-15 04:33:29 +00:00
|
|
|
fsutil.SimpleFileInode
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
// newComm returns a new comm file.
|
|
|
|
func newComm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
c := &comm{
|
|
|
|
SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC),
|
|
|
|
t: t,
|
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, c, msrc, fs.SpecialFile, t)
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 20:59:01 +00:00
|
|
|
// Check implements fs.InodeOperations.Check.
|
|
|
|
func (c *comm) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool {
|
|
|
|
// This file can always be read or written by members of the same
|
|
|
|
// thread group. See fs/proc/base.c:proc_tid_comm_permission.
|
|
|
|
//
|
|
|
|
// N.B. This check is currently a no-op as we don't yet support writing
|
|
|
|
// and this file is world-readable anyways.
|
|
|
|
t := kernel.TaskFromContext(ctx)
|
|
|
|
if t != nil && t.ThreadGroup() == c.t.ThreadGroup() && !p.Execute {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return fs.ContextCanAccessFile(ctx, inode, p)
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// GetFile implements fs.InodeOperations.GetFile.
|
|
|
|
func (c *comm) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
|
|
|
|
return fs.NewFile(ctx, dirent, flags, &commFile{t: c.t}), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// +stateify savable
|
|
|
|
type commFile struct {
|
2019-04-11 07:41:42 +00:00
|
|
|
fsutil.FileGenericSeek `state:"nosave"`
|
|
|
|
fsutil.FileNoIoctl `state:"nosave"`
|
|
|
|
fsutil.FileNoMMap `state:"nosave"`
|
2019-05-21 22:17:05 +00:00
|
|
|
fsutil.FileNoSplice `state:"nosave"`
|
|
|
|
fsutil.FileNoWrite `state:"nosave"`
|
2019-04-11 07:41:42 +00:00
|
|
|
fsutil.FileNoopFlush `state:"nosave"`
|
|
|
|
fsutil.FileNoopFsync `state:"nosave"`
|
|
|
|
fsutil.FileNoopRelease `state:"nosave"`
|
|
|
|
fsutil.FileNotDirReaddir `state:"nosave"`
|
|
|
|
fsutil.FileUseInodeUnstableAttr `state:"nosave"`
|
2019-05-21 22:17:05 +00:00
|
|
|
waiter.AlwaysReady `state:"nosave"`
|
2019-01-15 04:33:29 +00:00
|
|
|
|
|
|
|
t *kernel.Task
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
var _ fs.FileOperations = (*commFile)(nil)
|
|
|
|
|
|
|
|
// Read implements fs.FileOperations.Read.
|
|
|
|
func (f *commFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if offset < 0 {
|
|
|
|
return 0, syserror.EINVAL
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
buf := []byte(f.t.Name() + "\n")
|
2018-04-27 17:37:02 +00:00
|
|
|
if offset >= int64(len(buf)) {
|
|
|
|
return 0, io.EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := dst.CopyOut(ctx, buf[offset:])
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// auxvec is a file containing the auxiliary vector for a task.
|
2018-08-02 17:41:44 +00:00
|
|
|
//
|
|
|
|
// +stateify savable
|
2018-04-27 17:37:02 +00:00
|
|
|
type auxvec struct {
|
2019-01-15 04:33:29 +00:00
|
|
|
fsutil.SimpleFileInode
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
t *kernel.Task
|
|
|
|
}
|
|
|
|
|
|
|
|
// newAuxvec returns a new auxvec file.
|
|
|
|
func newAuxvec(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {
|
2019-01-15 04:33:29 +00:00
|
|
|
a := &auxvec{
|
|
|
|
SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC),
|
|
|
|
t: t,
|
|
|
|
}
|
2019-06-14 01:39:43 +00:00
|
|
|
return newProcInode(t, a, msrc, fs.SpecialFile, t)
|
2019-01-15 04:33:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFile implements fs.InodeOperations.GetFile.
|
|
|
|
func (a *auxvec) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
|
|
|
|
return fs.NewFile(ctx, dirent, flags, &auxvecFile{t: a.t}), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// +stateify savable
|
|
|
|
type auxvecFile struct {
|
2019-04-11 07:41:42 +00:00
|
|
|
fsutil.FileGenericSeek `state:"nosave"`
|
|
|
|
fsutil.FileNoIoctl `state:"nosave"`
|
|
|
|
fsutil.FileNoMMap `state:"nosave"`
|
2019-05-21 22:17:05 +00:00
|
|
|
fsutil.FileNoSplice `state:"nosave"`
|
|
|
|
fsutil.FileNoWrite `state:"nosave"`
|
2019-04-11 07:41:42 +00:00
|
|
|
fsutil.FileNoopFlush `state:"nosave"`
|
|
|
|
fsutil.FileNoopFsync `state:"nosave"`
|
|
|
|
fsutil.FileNoopRelease `state:"nosave"`
|
|
|
|
fsutil.FileNotDirReaddir `state:"nosave"`
|
|
|
|
fsutil.FileUseInodeUnstableAttr `state:"nosave"`
|
2019-05-21 22:17:05 +00:00
|
|
|
waiter.AlwaysReady `state:"nosave"`
|
2019-01-15 04:33:29 +00:00
|
|
|
|
|
|
|
t *kernel.Task
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
// Read implements fs.FileOperations.Read.
|
|
|
|
func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
|
2018-04-27 17:37:02 +00:00
|
|
|
if offset < 0 {
|
|
|
|
return 0, syserror.EINVAL
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:33:29 +00:00
|
|
|
m, err := getTaskMM(f.t)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
defer m.DecUsers(ctx)
|
|
|
|
auxv := m.Auxv()
|
|
|
|
|
|
|
|
// Space for buffer with AT_NULL (0) terminator at the end.
|
|
|
|
size := (len(auxv) + 1) * 16
|
|
|
|
if offset >= int64(size) {
|
|
|
|
return 0, io.EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, size)
|
|
|
|
for i, e := range auxv {
|
|
|
|
usermem.ByteOrder.PutUint64(buf[16*i:], e.Key)
|
|
|
|
usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value))
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := dst.CopyOut(ctx, buf[offset:])
|
|
|
|
return int64(n), err
|
|
|
|
}
|