2019-04-29 21:25:05 +00:00
|
|
|
// Copyright 2018 The gVisor Authors.
|
2018-04-27 17:37:02 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package boot
|
|
|
|
|
|
|
|
import (
|
2018-06-15 16:17:40 +00:00
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2018-04-27 17:37:02 +00:00
|
|
|
"os"
|
2018-06-21 17:17:19 +00:00
|
|
|
"reflect"
|
2018-09-07 19:27:44 +00:00
|
|
|
"syscall"
|
2018-04-27 17:37:02 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
2020-04-14 18:20:11 +00:00
|
|
|
"golang.org/x/sys/unix"
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/control/server"
|
|
|
|
"gvisor.dev/gvisor/pkg/log"
|
|
|
|
"gvisor.dev/gvisor/pkg/p9"
|
2020-01-27 23:17:58 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/sentry/contexttest"
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/sentry/fs"
|
2020-01-10 06:00:42 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/sync"
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/unet"
|
|
|
|
"gvisor.dev/gvisor/runsc/fsgofer"
|
2018-04-27 17:37:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
log.SetLevel(log.Debug)
|
2018-06-15 16:17:40 +00:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
2019-06-25 04:43:14 +00:00
|
|
|
if err := fsgofer.OpenProcSelfFD(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-08-27 18:09:06 +00:00
|
|
|
func testConfig() *Config {
|
|
|
|
return &Config{
|
|
|
|
RootDir: "unused_root_dir",
|
|
|
|
Network: NetworkNone,
|
|
|
|
DisableSeccomp: true,
|
2019-07-04 05:50:26 +00:00
|
|
|
Platform: "ptrace",
|
2018-08-27 18:09:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// testSpec returns a simple spec that can be used in tests.
|
|
|
|
func testSpec() *specs.Spec {
|
|
|
|
return &specs.Spec{
|
|
|
|
// The host filesystem root is the sandbox root.
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: "/",
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Process: &specs.Process{
|
|
|
|
Args: []string{"/bin/true"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-07 19:27:44 +00:00
|
|
|
// startGofer starts a new gofer routine serving 'root' path. It returns the
|
|
|
|
// sandbox side of the connection, and a function that when called will stop the
|
|
|
|
// gofer.
|
|
|
|
func startGofer(root string) (int, func(), error) {
|
|
|
|
fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
sandboxEnd, goferEnd := fds[0], fds[1]
|
|
|
|
|
|
|
|
socket, err := unet.NewSocket(goferEnd)
|
|
|
|
if err != nil {
|
|
|
|
syscall.Close(sandboxEnd)
|
|
|
|
syscall.Close(goferEnd)
|
|
|
|
return 0, nil, fmt.Errorf("error creating server on FD %d: %v", goferEnd, err)
|
|
|
|
}
|
2018-11-02 00:51:22 +00:00
|
|
|
at, err := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2018-09-07 19:27:44 +00:00
|
|
|
go func() {
|
|
|
|
s := p9.NewServer(at)
|
|
|
|
if err := s.Handle(socket); err != nil {
|
|
|
|
log.Infof("Gofer is stopping. FD: %d, err: %v\n", goferEnd, err)
|
|
|
|
}
|
|
|
|
}()
|
2018-10-17 23:17:35 +00:00
|
|
|
// Closing the gofer socket will stop the gofer and exit goroutine above.
|
|
|
|
cleanup := func() {
|
|
|
|
if err := socket.Close(); err != nil {
|
|
|
|
log.Warningf("Error closing gofer socket: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return sandboxEnd, cleanup, nil
|
2018-09-07 19:27:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func createLoader() (*Loader, func(), error) {
|
2018-06-15 16:17:40 +00:00
|
|
|
fd, err := server.CreateSocket(ControlSocketAddr(fmt.Sprintf("%010d", rand.Int())[:10]))
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-09-07 19:27:44 +00:00
|
|
|
return nil, nil, err
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-08-27 18:09:06 +00:00
|
|
|
conf := testConfig()
|
2018-07-18 23:57:29 +00:00
|
|
|
spec := testSpec()
|
2018-09-07 19:27:44 +00:00
|
|
|
|
|
|
|
sandEnd, cleanup, err := startGofer(spec.Root.Path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-14 18:20:11 +00:00
|
|
|
// Loader takes ownership of stdio.
|
|
|
|
var stdio []int
|
|
|
|
for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
|
|
|
|
newFd, err := unix.Dup(int(f.Fd()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
stdio = append(stdio, newFd)
|
|
|
|
}
|
|
|
|
|
2018-10-10 15:59:25 +00:00
|
|
|
args := Args{
|
|
|
|
ID: "foo",
|
|
|
|
Spec: spec,
|
|
|
|
Conf: conf,
|
|
|
|
ControllerFD: fd,
|
|
|
|
GoferFDs: []int{sandEnd},
|
|
|
|
StdioFDs: stdio,
|
|
|
|
}
|
|
|
|
l, err := New(args)
|
2018-09-07 19:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
cleanup()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return l, cleanup, nil
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestRun runs a simple application in a sandbox and checks that it succeeds.
|
|
|
|
func TestRun(t *testing.T) {
|
2018-09-17 23:24:05 +00:00
|
|
|
l, cleanup, err := createLoader()
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating loader: %v", err)
|
|
|
|
}
|
2018-09-17 23:24:05 +00:00
|
|
|
defer l.Destroy()
|
2018-09-07 19:27:44 +00:00
|
|
|
defer cleanup()
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-05-09 21:12:44 +00:00
|
|
|
// Start a goroutine to read the start chan result, otherwise Run will
|
|
|
|
// block forever.
|
|
|
|
var resultChanErr error
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
2018-09-17 23:24:05 +00:00
|
|
|
resultChanErr = <-l.ctrl.manager.startResultChan
|
2018-05-09 21:12:44 +00:00
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
2018-09-17 23:24:05 +00:00
|
|
|
// Run the container.
|
|
|
|
if err := l.Run(); err != nil {
|
2018-05-17 18:54:36 +00:00
|
|
|
t.Errorf("error running container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:12:44 +00:00
|
|
|
// We should have not gotten an error on the startResultChan.
|
|
|
|
wg.Wait()
|
|
|
|
if resultChanErr != nil {
|
|
|
|
t.Errorf("error on startResultChan: %v", resultChanErr)
|
|
|
|
}
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// Wait for the application to exit. It should succeed.
|
2018-09-17 23:24:05 +00:00
|
|
|
if status := l.WaitExit(); status.Code != 0 || status.Signo != 0 {
|
2018-04-27 17:37:02 +00:00
|
|
|
t.Errorf("application exited with status %+v, want 0", status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestStartSignal tests that the controller Start message will cause
|
|
|
|
// WaitForStartSignal to return.
|
|
|
|
func TestStartSignal(t *testing.T) {
|
2018-09-17 23:24:05 +00:00
|
|
|
l, cleanup, err := createLoader()
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating loader: %v", err)
|
|
|
|
}
|
2018-09-17 23:24:05 +00:00
|
|
|
defer l.Destroy()
|
2018-09-07 19:27:44 +00:00
|
|
|
defer cleanup()
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
// We aren't going to wait on this application, so the control server
|
|
|
|
// needs to be shut down manually.
|
2018-09-17 23:24:05 +00:00
|
|
|
defer l.ctrl.srv.Stop()
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
// Start a goroutine that calls WaitForStartSignal and writes to a
|
|
|
|
// channel when it returns.
|
|
|
|
waitFinished := make(chan struct{})
|
|
|
|
go func() {
|
2018-09-17 23:24:05 +00:00
|
|
|
l.WaitForStartSignal()
|
2018-05-09 21:12:44 +00:00
|
|
|
// Pretend that Run() executed and returned no error.
|
2018-09-17 23:24:05 +00:00
|
|
|
l.ctrl.manager.startResultChan <- nil
|
2018-04-27 17:37:02 +00:00
|
|
|
waitFinished <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Nothing has been written to the channel, so waitFinished should not
|
|
|
|
// return. Give it a little bit of time to make sure the goroutine has
|
|
|
|
// started.
|
|
|
|
select {
|
|
|
|
case <-waitFinished:
|
|
|
|
t.Errorf("WaitForStartSignal completed but it should not have")
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
// OK.
|
|
|
|
}
|
|
|
|
|
2018-05-17 18:54:36 +00:00
|
|
|
// Trigger the control server StartRoot method.
|
2018-06-22 21:30:33 +00:00
|
|
|
cid := "foo"
|
2018-09-17 23:24:05 +00:00
|
|
|
if err := l.ctrl.manager.StartRoot(&cid, nil); err != nil {
|
2018-05-17 18:54:36 +00:00
|
|
|
t.Errorf("error calling StartRoot: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now WaitForStartSignal should return (within a short amount of
|
|
|
|
// time).
|
|
|
|
select {
|
|
|
|
case <-waitFinished:
|
|
|
|
// OK.
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
t.Errorf("WaitForStartSignal did not complete but it should have")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that MountNamespace can be created with various specs.
|
|
|
|
func TestCreateMountNamespace(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
// Spec that will be used to create the mount manager. Note
|
|
|
|
// that we can't mount procfs without a kernel, so each spec
|
|
|
|
// MUST contain something other than procfs mounted at /proc.
|
|
|
|
spec specs.Spec
|
|
|
|
// Paths that are expected to exist in the resulting fs.
|
|
|
|
expectedPaths []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// Only proc.
|
|
|
|
name: "only proc mount",
|
|
|
|
spec: specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// /proc, /dev, and /sys should always be mounted.
|
|
|
|
expectedPaths: []string{"/proc", "/dev", "/sys"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Mount at a deep path, with many components that do
|
|
|
|
// not exist in the root.
|
|
|
|
name: "deep mount path",
|
|
|
|
spec: specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/some/very/very/deep/path",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// /some/deep/path should be mounted, along with /proc,
|
|
|
|
// /dev, and /sys.
|
|
|
|
expectedPaths: []string{"/some/very/very/deep/path", "/proc", "/dev", "/sys"},
|
|
|
|
},
|
|
|
|
{
|
2018-06-12 20:54:02 +00:00
|
|
|
// Mounts are nested inside each other.
|
2018-04-27 17:37:02 +00:00
|
|
|
name: "nested mounts",
|
|
|
|
spec: specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/foo",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
2018-06-12 20:54:02 +00:00
|
|
|
{
|
|
|
|
Destination: "/foo/qux",
|
2018-08-27 18:09:06 +00:00
|
|
|
Type: "tmpfs",
|
2018-06-12 20:54:02 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// File mounts with the same prefix.
|
|
|
|
Destination: "/foo/qux-quz",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
2018-04-27 17:37:02 +00:00
|
|
|
{
|
|
|
|
Destination: "/foo/bar",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/foo/bar/baz",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// A deep path that is in foo but not the other mounts.
|
|
|
|
Destination: "/foo/some/very/very/deep/path",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-06-12 20:54:02 +00:00
|
|
|
expectedPaths: []string{"/foo", "/foo/bar", "/foo/bar/baz", "/foo/qux",
|
|
|
|
"/foo/qux-quz", "/foo/some/very/very/deep/path", "/proc", "/dev", "/sys"},
|
2018-04-27 17:37:02 +00:00
|
|
|
},
|
2018-06-15 20:57:29 +00:00
|
|
|
{
|
|
|
|
name: "mount inside /dev",
|
|
|
|
spec: specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/dev",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Mounted by runsc by default.
|
|
|
|
Destination: "/dev/fd",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Mount with the same prefix.
|
|
|
|
Destination: "/dev/fd-foo",
|
2018-08-27 18:09:06 +00:00
|
|
|
Type: "tmpfs",
|
2018-06-15 20:57:29 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// Unsupported fs type.
|
|
|
|
Destination: "/dev/mqueue",
|
|
|
|
Type: "mqueue",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/dev/foo",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/dev/bar",
|
2018-08-27 18:09:06 +00:00
|
|
|
Type: "tmpfs",
|
2018-06-15 20:57:29 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedPaths: []string{"/proc", "/dev", "/dev/fd-foo", "/dev/foo", "/dev/bar", "/sys"},
|
|
|
|
},
|
2018-07-03 17:35:27 +00:00
|
|
|
{
|
|
|
|
name: "mounts inside mandatory mounts",
|
|
|
|
spec: specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
// We don't include /sys, and /tmp in
|
|
|
|
// the spec, since they will be added
|
|
|
|
// automatically.
|
|
|
|
//
|
|
|
|
// Instead, add submounts inside these
|
|
|
|
// directories and make sure they are
|
|
|
|
// visible under the mandatory mounts.
|
|
|
|
{
|
|
|
|
Destination: "/sys/bar",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/tmp/baz",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedPaths: []string{"/proc", "/sys", "/sys/bar", "/tmp", "/tmp/baz"},
|
|
|
|
},
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
2018-08-27 18:09:06 +00:00
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
conf := testConfig()
|
|
|
|
ctx := contexttest.Context(t)
|
2018-09-07 19:27:44 +00:00
|
|
|
|
|
|
|
sandEnd, cleanup, err := startGofer(tc.spec.Root.Path)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create gofer: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-08-02 20:46:42 +00:00
|
|
|
mntr := newContainerMounter(&tc.spec, []int{sandEnd}, nil, &podMountHints{})
|
2019-08-27 17:46:06 +00:00
|
|
|
mns, err := mntr.createMountNamespace(ctx, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create mount namespace: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2019-08-27 17:46:06 +00:00
|
|
|
ctx = fs.WithRoot(ctx, mns.Root())
|
|
|
|
if err := mntr.mountSubmounts(ctx, conf, mns); err != nil {
|
|
|
|
t.Fatalf("failed to create mount namespace: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:47:21 +00:00
|
|
|
root := mns.Root()
|
2018-08-27 18:09:06 +00:00
|
|
|
defer root.DecRef()
|
|
|
|
for _, p := range tc.expectedPaths {
|
2018-12-04 22:31:08 +00:00
|
|
|
maxTraversals := uint(0)
|
2019-01-16 20:47:21 +00:00
|
|
|
if d, err := mns.FindInode(ctx, root, root, p, &maxTraversals); err != nil {
|
2018-08-27 18:09:06 +00:00
|
|
|
t.Errorf("expected path %v to exist with spec %v, but got error %v", p, tc.spec, err)
|
2018-10-11 23:22:44 +00:00
|
|
|
} else {
|
|
|
|
d.DecRef()
|
2018-08-27 18:09:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-21 17:17:19 +00:00
|
|
|
|
|
|
|
// TestRestoreEnvironment tests that the correct mounts are collected from the spec and config
|
|
|
|
// in order to build the environment for restoring.
|
|
|
|
func TestRestoreEnvironment(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
spec *specs.Spec
|
|
|
|
ioFDs []int
|
|
|
|
errorExpected bool
|
|
|
|
expectedRenv fs.RestoreEnvironment
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "basic spec test",
|
|
|
|
spec: &specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/some/very/very/deep/path",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Destination: "/proc",
|
|
|
|
Type: "tmpfs",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ioFDs: []int{0},
|
|
|
|
errorExpected: false,
|
|
|
|
expectedRenv: fs.RestoreEnvironment{
|
|
|
|
MountSources: map[string][]fs.MountArgs{
|
|
|
|
"9p": {
|
|
|
|
{
|
2019-03-14 02:23:02 +00:00
|
|
|
Dev: "9pfs-/",
|
|
|
|
Flags: fs.MountSourceFlags{ReadOnly: true},
|
|
|
|
DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true,cache=remote_revalidating",
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
"tmpfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
2018-06-29 21:46:45 +00:00
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"devtmpfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"devpts": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"sysfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "bind type test",
|
|
|
|
spec: &specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/dev/fd-foo",
|
|
|
|
Type: "bind",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ioFDs: []int{0, 1},
|
|
|
|
errorExpected: false,
|
|
|
|
expectedRenv: fs.RestoreEnvironment{
|
|
|
|
MountSources: map[string][]fs.MountArgs{
|
|
|
|
"9p": {
|
|
|
|
{
|
2019-03-14 02:23:02 +00:00
|
|
|
Dev: "9pfs-/",
|
|
|
|
Flags: fs.MountSourceFlags{ReadOnly: true},
|
|
|
|
DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true,cache=remote_revalidating",
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
{
|
2019-03-14 02:23:02 +00:00
|
|
|
Dev: "9pfs-/dev/fd-foo",
|
|
|
|
DataString: "trans=fd,rfdno=1,wfdno=1,privateunixsocket=true,cache=remote_revalidating",
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
2018-06-29 21:46:45 +00:00
|
|
|
"tmpfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"devtmpfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"devpts": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"proc": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"sysfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "options test",
|
|
|
|
spec: &specs.Spec{
|
|
|
|
Root: &specs.Root{
|
|
|
|
Path: os.TempDir(),
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
Mounts: []specs.Mount{
|
|
|
|
{
|
|
|
|
Destination: "/dev/fd-foo",
|
|
|
|
Type: "tmpfs",
|
|
|
|
Options: []string{"uid=1022", "noatime"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ioFDs: []int{0},
|
|
|
|
errorExpected: false,
|
|
|
|
expectedRenv: fs.RestoreEnvironment{
|
|
|
|
MountSources: map[string][]fs.MountArgs{
|
|
|
|
"9p": {
|
|
|
|
{
|
2019-03-14 02:23:02 +00:00
|
|
|
Dev: "9pfs-/",
|
|
|
|
Flags: fs.MountSourceFlags{ReadOnly: true},
|
|
|
|
DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true,cache=remote_revalidating",
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
"tmpfs": {
|
|
|
|
{
|
2019-03-14 02:23:02 +00:00
|
|
|
Dev: "none",
|
|
|
|
Flags: fs.MountSourceFlags{NoAtime: true},
|
|
|
|
DataString: "uid=1022",
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
2019-01-16 20:47:21 +00:00
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
2018-06-29 21:46:45 +00:00
|
|
|
},
|
|
|
|
"devtmpfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"devpts": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"proc": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"sysfs": {
|
|
|
|
{
|
|
|
|
Dev: "none",
|
|
|
|
},
|
2018-06-21 17:17:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
2018-08-27 18:09:06 +00:00
|
|
|
conf := testConfig()
|
2019-08-02 20:46:42 +00:00
|
|
|
mntr := newContainerMounter(tc.spec, tc.ioFDs, nil, &podMountHints{})
|
2019-06-04 01:19:52 +00:00
|
|
|
actualRenv, err := mntr.createRestoreEnvironment(conf)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
if !tc.errorExpected && err != nil {
|
|
|
|
t.Fatalf("could not create restore environment for test:%s", tc.name)
|
|
|
|
} else if tc.errorExpected {
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("expected an error, but no error occurred.")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !reflect.DeepEqual(*actualRenv, tc.expectedRenv) {
|
|
|
|
t.Errorf("restore environments did not match for test:%s\ngot:%+v\nwant:%+v\n", tc.name, *actualRenv, tc.expectedRenv)
|
|
|
|
}
|
2018-06-21 17:17:19 +00:00
|
|
|
}
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
})
|
2018-06-21 17:17:19 +00:00
|
|
|
}
|
|
|
|
}
|