2018-04-27 17:37:02 +00:00
|
|
|
// Copyright 2018 Google Inc.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-08-21 21:01:14 +00:00
|
|
|
package container
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
import (
|
2018-08-10 21:31:56 +00:00
|
|
|
"bytes"
|
2018-04-27 17:37:02 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2018-05-24 21:27:05 +00:00
|
|
|
"path"
|
2018-04-27 17:37:02 +00:00
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
2018-06-29 21:46:45 +00:00
|
|
|
"strconv"
|
2018-04-27 17:37:02 +00:00
|
|
|
"strings"
|
2018-05-17 18:54:36 +00:00
|
|
|
"sync"
|
2018-04-27 17:37:02 +00:00
|
|
|
"syscall"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
|
|
"golang.org/x/sys/unix"
|
|
|
|
"gvisor.googlesource.com/gvisor/pkg/abi/linux"
|
|
|
|
"gvisor.googlesource.com/gvisor/pkg/log"
|
|
|
|
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
|
|
|
|
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth"
|
|
|
|
"gvisor.googlesource.com/gvisor/pkg/unet"
|
2018-07-23 20:30:29 +00:00
|
|
|
"gvisor.googlesource.com/gvisor/runsc/boot"
|
2018-06-04 18:25:40 +00:00
|
|
|
"gvisor.googlesource.com/gvisor/runsc/test/testutil"
|
2018-04-27 17:37:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
log.SetLevel(log.Debug)
|
2018-06-04 18:25:40 +00:00
|
|
|
if err := testutil.ConfigureExePath(); err != nil {
|
|
|
|
panic(err.Error())
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// waitForProcessList waits for the given process list to show up in the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
func waitForProcessList(s *Container, expected []*control.Process) error {
|
2018-04-27 17:37:02 +00:00
|
|
|
var got []*control.Process
|
|
|
|
for start := time.Now(); time.Now().Sub(start) < 10*time.Second; {
|
|
|
|
var err error
|
2018-06-20 04:42:21 +00:00
|
|
|
got, err = s.Processes()
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
return fmt.Errorf("error getting process data from container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if procListsEqual(got, expected) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Process might not have started, try again...
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|
2018-05-15 17:17:19 +00:00
|
|
|
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expected))
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
// procListsEqual is used to check whether 2 Process lists are equal for all
|
|
|
|
// implemented fields.
|
|
|
|
func procListsEqual(got, want []*control.Process) bool {
|
|
|
|
if len(got) != len(want) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range got {
|
|
|
|
pd1 := got[i]
|
|
|
|
pd2 := want[i]
|
|
|
|
// Zero out unimplemented and timing dependant fields.
|
2018-06-28 21:55:46 +00:00
|
|
|
pd1.Time = ""
|
|
|
|
pd1.STime = ""
|
|
|
|
pd1.C = 0
|
2018-05-24 21:27:05 +00:00
|
|
|
if *pd1 != *pd2 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-06-19 22:22:23 +00:00
|
|
|
// getAndCheckProcLists is similar to waitForProcessList, but does not wait and retry the
|
|
|
|
// test for equality. This is because we already confirmed that exec occurred.
|
2018-08-21 21:01:14 +00:00
|
|
|
func getAndCheckProcLists(cont *Container, want []*control.Process) error {
|
2018-06-19 22:22:23 +00:00
|
|
|
got, err := cont.Processes()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error getting process data from container: %v", err)
|
|
|
|
}
|
|
|
|
if procListsEqual(got, want) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
|
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
func procListToString(pl []*control.Process) string {
|
|
|
|
strs := make([]string, 0, len(pl))
|
|
|
|
for _, p := range pl {
|
|
|
|
strs = append(strs, fmt.Sprintf("%+v", p))
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("[%s]", strings.Join(strs, ","))
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// createWriteableOutputFile creates an output file that can be read and
|
|
|
|
// written to in the sandbox.
|
2018-06-29 21:46:45 +00:00
|
|
|
func createWriteableOutputFile(path string) (*os.File, error) {
|
|
|
|
outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error creating file: %q, %v", path, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chmod to allow writing after umask.
|
|
|
|
if err := outputFile.Chmod(0666); err != nil {
|
|
|
|
return nil, fmt.Errorf("error chmoding file: %q, %v", path, err)
|
|
|
|
}
|
|
|
|
return outputFile, nil
|
|
|
|
}
|
|
|
|
|
2018-07-26 00:36:52 +00:00
|
|
|
func waitForFile(f *os.File) error {
|
|
|
|
op := func() error {
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fi.Size() == 0 {
|
|
|
|
return fmt.Errorf("file %q is empty", f.Name())
|
|
|
|
}
|
|
|
|
return nil
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-08-27 21:25:21 +00:00
|
|
|
|
|
|
|
timeout := 5 * time.Second
|
|
|
|
if testutil.RaceEnabled {
|
|
|
|
// Race makes slow things even slow, so bump the timeout.
|
|
|
|
timeout = 3 * timeout
|
|
|
|
}
|
|
|
|
return testutil.Poll(op, timeout)
|
2018-07-26 00:36:52 +00:00
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// readOutputNum reads a file at given filepath and returns the int at the
|
|
|
|
// requested position.
|
|
|
|
func readOutputNum(file string, position int) (int, error) {
|
|
|
|
f, err := os.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error opening file: %q, %v", file, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that there is content in output file.
|
2018-07-26 00:36:52 +00:00
|
|
|
if err := waitForFile(f); err != nil {
|
2018-08-10 21:31:56 +00:00
|
|
|
return 0, fmt.Errorf("error waiting for output file: %v", err)
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 22:44:34 +00:00
|
|
|
b, err := ioutil.ReadAll(f)
|
2018-06-29 21:46:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error reading file: %v", err)
|
|
|
|
}
|
2018-07-18 22:44:34 +00:00
|
|
|
if len(b) == 0 {
|
2018-06-29 21:46:45 +00:00
|
|
|
return 0, fmt.Errorf("error no content was read")
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Strip leading null bytes caused by file offset not being 0 upon restore.
|
|
|
|
b = bytes.Trim(b, "\x00")
|
2018-07-18 22:44:34 +00:00
|
|
|
nums := strings.Split(string(b), "\n")
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
if position >= len(nums) {
|
|
|
|
return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums)
|
|
|
|
}
|
|
|
|
if position == -1 {
|
|
|
|
// Expectation of newline at the end of last position.
|
|
|
|
position = len(nums) - 2
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
num, err := strconv.Atoi(nums[position])
|
2018-06-29 21:46:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error getting number from file: %v", err)
|
|
|
|
}
|
|
|
|
return num, nil
|
|
|
|
}
|
|
|
|
|
2018-06-04 19:30:47 +00:00
|
|
|
// run starts the sandbox and waits for it to exit, checking that the
|
|
|
|
// application succeeded.
|
2018-07-23 20:30:29 +00:00
|
|
|
func run(spec *specs.Spec, conf *boot.Config) error {
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-06-04 19:30:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create, start and wait for the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-06-04 19:30:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Destroy()
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
return fmt.Errorf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
ws, err := s.Wait()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || ws.ExitStatus() != 0 {
|
|
|
|
return fmt.Errorf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
type configOption int
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
const (
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
overlay configOption = iota
|
2018-07-23 20:30:29 +00:00
|
|
|
kvm
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
nonExclusiveFS
|
2018-07-23 20:30:29 +00:00
|
|
|
)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
|
2018-08-27 21:25:21 +00:00
|
|
|
var noOverlay = []configOption{kvm, nonExclusiveFS}
|
|
|
|
var all = append(noOverlay, overlay)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// configs generates different configurations to run tests.
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
func configs(opts ...configOption) []*boot.Config {
|
|
|
|
// Always load the default config.
|
|
|
|
cs := []*boot.Config{testutil.TestConfig()}
|
2018-06-20 00:03:55 +00:00
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, o := range opts {
|
2018-07-23 20:30:29 +00:00
|
|
|
c := testutil.TestConfig()
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
switch o {
|
|
|
|
case overlay:
|
|
|
|
c.Overlay = true
|
|
|
|
case kvm:
|
|
|
|
// TODO: KVM tests are flaky. Disable until fixed.
|
|
|
|
continue
|
|
|
|
|
|
|
|
// TODO: KVM doesn't work with --race.
|
|
|
|
if testutil.RaceEnabled {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c.Platform = boot.PlatformKVM
|
|
|
|
case nonExclusiveFS:
|
|
|
|
c.FileAccess = boot.FileAccessProxy
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown config option %v", o))
|
|
|
|
|
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
cs = append(cs, c)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
return cs
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
|
|
|
|
// It verifies after each step that the container can be loaded from disk, and
|
|
|
|
// has the correct status.
|
|
|
|
func TestLifecycle(t *testing.T) {
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, conf := range configs(all...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
// The container will just sleep for a long time. We will kill it before
|
|
|
|
// it finishes sleeping.
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-05-17 18:54:36 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
2018-05-17 18:54:36 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create the container.
|
|
|
|
id := testutil.UniqueContainerID()
|
2018-08-21 21:01:14 +00:00
|
|
|
if _, err := Create(id, spec, conf, bundleDir, "", ""); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error creating container: %v", err)
|
2018-05-17 18:54:36 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Load the container from disk and check the status.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Load(rootDir, id)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := s.Status, Created; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// List should return the container id.
|
2018-08-21 21:01:14 +00:00
|
|
|
ids, err := List(rootDir)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error listing containers: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := ids, []string{id}; !reflect.DeepEqual(got, want) {
|
|
|
|
t.Errorf("container list got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Start the container.
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
// Load the container from disk and check the status.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err = Load(rootDir, id)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := s.Status, Running; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" is running.
|
|
|
|
if err := waitForProcessList(s, expectedPL); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Wait on the container.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
ch := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
ch <- struct{}{}
|
|
|
|
ws, err := s.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := ws.Signal(), syscall.SIGTERM; got != want {
|
|
|
|
t.Fatalf("got signal %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait a bit to ensure that we've started waiting on the container
|
|
|
|
// before we signal.
|
|
|
|
<-ch
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Send the container a SIGTERM which will cause it to stop.
|
|
|
|
if err := s.Signal(syscall.SIGTERM); err != nil {
|
|
|
|
t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err)
|
|
|
|
}
|
|
|
|
// Wait for it to die.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// The sandbox process should have exited by now, but it is a zombie.
|
|
|
|
// In normal runsc usage, it will be parented to init, and init will
|
|
|
|
// reap the sandbox. However, in this case the test runner is the
|
|
|
|
// parent and will not reap the sandbox process, so we must do it
|
|
|
|
// ourselves.
|
|
|
|
p, _ := os.FindProcess(s.Sandbox.Pid)
|
|
|
|
p.Wait()
|
2018-08-21 20:13:34 +00:00
|
|
|
g, _ := os.FindProcess(s.GoferPid)
|
2018-07-23 20:30:29 +00:00
|
|
|
g.Wait()
|
|
|
|
|
|
|
|
// Load the container from disk and check the status.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err = Load(rootDir, id)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := s.Status, Stopped; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Destroy the container.
|
|
|
|
if err := s.Destroy(); err != nil {
|
|
|
|
t.Fatalf("error destroying container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// List should not return the container id.
|
2018-08-21 21:01:14 +00:00
|
|
|
ids, err = List(rootDir)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error listing containers: %v", err)
|
|
|
|
}
|
|
|
|
if len(ids) != 0 {
|
|
|
|
t.Errorf("expected container list to be empty, but got %v", ids)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Loading the container by id should fail.
|
2018-08-21 21:01:14 +00:00
|
|
|
if _, err = Load(rootDir, id); err == nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("expected loading destroyed container to fail, but it did not")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Test the we can execute the application with different path formats.
|
|
|
|
func TestExePath(t *testing.T) {
|
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
for _, test := range []struct {
|
|
|
|
path string
|
|
|
|
success bool
|
|
|
|
}{
|
|
|
|
{path: "true", success: true},
|
|
|
|
{path: "bin/true", success: true},
|
|
|
|
{path: "/bin/true", success: true},
|
|
|
|
{path: "thisfiledoesntexit", success: false},
|
|
|
|
{path: "bin/thisfiledoesntexit", success: false},
|
|
|
|
{path: "/bin/thisfiledoesntexit", success: false},
|
|
|
|
} {
|
|
|
|
spec := testutil.NewSpecWithArgs(test.path)
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("exec: %s, error setting up container: %v", test.path, err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
|
2018-08-21 21:01:14 +00:00
|
|
|
ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
|
|
|
|
os.RemoveAll(rootDir)
|
|
|
|
os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
if test.success {
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("exec: %s, error running container: %v", test.path, err)
|
|
|
|
}
|
|
|
|
if ws.ExitStatus() != 0 {
|
|
|
|
t.Errorf("exec: %s, got exit status %v want %v", test.path, ws.ExitStatus(), 0)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("exec: %s, got: no error, want: error", test.path)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// Test the we can retrieve the application exit status from the container.
|
2018-04-27 17:37:02 +00:00
|
|
|
func TestAppExitStatus(t *testing.T) {
|
2018-05-15 17:17:19 +00:00
|
|
|
// First container will succeed.
|
2018-06-04 18:25:40 +00:00
|
|
|
succSpec := testutil.NewSpecWithArgs("true")
|
2018-07-23 20:30:29 +00:00
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(succSpec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
2018-08-21 21:01:14 +00:00
|
|
|
ws, err := Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, "", "")
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error running container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if ws.ExitStatus() != 0 {
|
|
|
|
t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// Second container exits with non-zero status.
|
2018-04-27 17:37:02 +00:00
|
|
|
wantStatus := 123
|
2018-06-04 18:25:40 +00:00
|
|
|
errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir2, bundleDir2, err := testutil.SetupContainer(errSpec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir2)
|
|
|
|
defer os.RemoveAll(bundleDir2)
|
|
|
|
|
2018-08-21 21:01:14 +00:00
|
|
|
ws, err = Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir2, "", "")
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error running container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if ws.ExitStatus() != wantStatus {
|
|
|
|
t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// TestExec verifies that a container can exec a new program.
|
2018-04-27 17:37:02 +00:00
|
|
|
func TestExec(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
const uid = 343
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Destroy()
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" is running.
|
|
|
|
if err := waitForProcessList(s, expectedPL[:1]); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
execArgs := control.ExecArgs{
|
|
|
|
Filename: "/bin/sleep",
|
|
|
|
Argv: []string{"sleep", "5"},
|
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" and "sleep 5" are running after exec.
|
|
|
|
// First, start running exec (whick blocks).
|
|
|
|
status := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
exitStatus, err := s.Execute(&execArgs)
|
|
|
|
if err != nil {
|
|
|
|
status <- err
|
|
|
|
} else if exitStatus != 0 {
|
|
|
|
status <- fmt.Errorf("failed with exit status: %v", exitStatus)
|
|
|
|
} else {
|
|
|
|
status <- nil
|
|
|
|
}
|
|
|
|
}()
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := waitForProcessList(s, expectedPL); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that exec finished without error.
|
|
|
|
select {
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("container timed out waiting for exec to finish.")
|
|
|
|
case st := <-status:
|
|
|
|
if st != nil {
|
|
|
|
t.Errorf("container failed to exec %v: %v", execArgs, err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-29 21:46:45 +00:00
|
|
|
// TestCheckpointRestore creates a container that continuously writes successive integers
|
|
|
|
// to a file. To test checkpoint and restore functionality, the container is
|
|
|
|
// checkpointed and the last number printed to the file is recorded. Then, it is restored in two
|
|
|
|
// new containers and the first number printed from these containers is checked. Both should
|
|
|
|
// be the next consecutive number after the last number from the checkpointed container.
|
|
|
|
func TestCheckpointRestore(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
// Skip overlay because test requires writing to host file.
|
2018-08-21 21:34:00 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-18 22:44:34 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := os.Chmod(dir, 0777); err != nil {
|
|
|
|
t.Fatalf("error chmoding file: %q, %v", dir, err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
outputPath := filepath.Join(dir, "output")
|
|
|
|
outputFile, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath)
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("bash", "-c", script)
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Set the image path, which is where the checkpoint image will be saved.
|
|
|
|
imagePath := filepath.Join(dir, "test-image-file")
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create the image file and open for writing.
|
|
|
|
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error opening new file at imagePath: %v", err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-26 00:36:52 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Checkpoint running container; save state into new file.
|
|
|
|
if err := cont.Checkpoint(file); err != nil {
|
|
|
|
t.Fatalf("error checkpointing container to empty file: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(imagePath)
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
lastNum, err := readOutputNum(outputPath, -1)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
|
|
|
outputFile2, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile2.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into a new container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont2, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont2.Destroy()
|
2018-07-18 23:57:29 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := cont2.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile2); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
firstNum, err := readOutputNum(outputPath, 0)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Check that lastNum is one less than firstNum and that the container picks
|
|
|
|
// up from where it left off.
|
2018-07-23 20:30:29 +00:00
|
|
|
if lastNum+1 != firstNum {
|
|
|
|
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
|
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
cont2.Destroy()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into another container!
|
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
|
|
|
outputFile3, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile3.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into a new container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont3, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont3.Destroy()
|
2018-07-18 23:57:29 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := cont3.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile3); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
firstNum2, err := readOutputNum(outputPath, 0)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Check that lastNum is one less than firstNum and that the container picks
|
|
|
|
// up from where it left off.
|
2018-07-23 20:30:29 +00:00
|
|
|
if lastNum+1 != firstNum2 {
|
|
|
|
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
|
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
cont3.Destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestUnixDomainSockets checks that Checkpoint/Restore works in cases
|
|
|
|
// with filesystem Unix Domain Socket use.
|
|
|
|
func TestUnixDomainSockets(t *testing.T) {
|
|
|
|
// Skip overlay because test requires writing to host file.
|
2018-08-21 21:34:00 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-08-10 21:31:56 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
// UDS path is limited to 108 chars for compatibility with older systems.
|
|
|
|
// Use '/tmp' (instead of testutil.TmpDir) to to ensure the size limit is
|
|
|
|
// not exceeded. Assumes '/tmp' exists in the system.
|
|
|
|
dir, err := ioutil.TempDir("/tmp", "uds-test")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir failed: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
outputPath := filepath.Join(dir, "uds_output")
|
2018-08-20 18:25:42 +00:00
|
|
|
outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile.Close()
|
|
|
|
|
2018-08-16 17:54:21 +00:00
|
|
|
app, err := testutil.FindFile("runsc/container/uds_test_app")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
2018-08-16 17:54:21 +00:00
|
|
|
t.Fatal("error finding uds_test_app:", err)
|
2018-08-10 21:31:56 +00:00
|
|
|
}
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
socketPath := filepath.Join(dir, "uds_socket")
|
2018-08-10 21:31:56 +00:00
|
|
|
defer os.Remove(socketPath)
|
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
spec := testutil.NewSpecWithArgs(app, "--file", outputPath, "--socket", socketPath)
|
2018-08-10 21:31:56 +00:00
|
|
|
spec.Process.User = specs.User{
|
|
|
|
UID: uint32(os.Getuid()),
|
|
|
|
GID: uint32(os.Getgid()),
|
|
|
|
}
|
2018-08-22 06:06:11 +00:00
|
|
|
spec.Mounts = []specs.Mount{
|
|
|
|
specs.Mount{
|
|
|
|
Type: "bind",
|
2018-08-31 23:11:07 +00:00
|
|
|
Destination: dir,
|
|
|
|
Source: dir,
|
2018-08-22 06:06:11 +00:00
|
|
|
},
|
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the image path, the location where the checkpoint image will be saved.
|
|
|
|
imagePath := filepath.Join(dir, "test-image-file")
|
|
|
|
|
|
|
|
// Create the image file and open for writing.
|
|
|
|
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error opening new file at imagePath: %v", err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
defer os.RemoveAll(imagePath)
|
|
|
|
|
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checkpoint running container; save state into new file.
|
|
|
|
if err := cont.Checkpoint(file); err != nil {
|
|
|
|
t.Fatalf("error checkpointing container to empty file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read last number outputted before checkpoint.
|
|
|
|
lastNum, err := readOutputNum(outputPath, -1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
2018-08-20 18:25:42 +00:00
|
|
|
outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile2.Close()
|
|
|
|
|
|
|
|
// Restore into a new container.
|
2018-08-21 21:01:14 +00:00
|
|
|
contRestore, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer contRestore.Destroy()
|
|
|
|
|
|
|
|
if err := contRestore.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile2); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read first number outputted after restore.
|
|
|
|
firstNum, err := readOutputNum(outputPath, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that lastNum is one less than firstNum.
|
|
|
|
if lastNum+1 != firstNum {
|
|
|
|
t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum)
|
|
|
|
}
|
|
|
|
contRestore.Destroy()
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-06-12 20:24:22 +00:00
|
|
|
}
|
|
|
|
|
2018-06-19 22:22:23 +00:00
|
|
|
// TestPauseResume tests that we can successfully pause and resume a container.
|
|
|
|
// It checks starts running sleep and executes another sleep. It pauses and checks
|
|
|
|
// that both processes are still running: sleep will be paused and still exist.
|
|
|
|
// It will then unpause and confirm that both processes are running. Then it will
|
|
|
|
// wait until one sleep completes and check to make sure the other is running.
|
|
|
|
func TestPauseResume(t *testing.T) {
|
2018-08-17 20:05:59 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
const uid = 343
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "20")
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
lock, err := ioutil.TempFile(testutil.TmpDir(), "lock")
|
2018-07-27 17:08:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer lock.Close()
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
2018-07-27 17:08:59 +00:00
|
|
|
Cmd: "bash",
|
2018-07-23 20:30:29 +00:00
|
|
|
},
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
script := fmt.Sprintf("while [[ -f %q ]]; do sleep 0.1; done", lock.Name())
|
2018-07-23 20:30:29 +00:00
|
|
|
execArgs := control.ExecArgs{
|
2018-07-27 17:08:59 +00:00
|
|
|
Filename: "/bin/bash",
|
|
|
|
Argv: []string{"bash", "-c", script},
|
2018-07-23 20:30:29 +00:00
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
// First, start running exec (which blocks).
|
2018-07-23 20:30:29 +00:00
|
|
|
go cont.Execute(&execArgs)
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 5" is running.
|
|
|
|
if err := waitForProcessList(cont, expectedPL); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Pause the running container.
|
|
|
|
if err := cont.Pause(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
if err := os.Remove(lock.Name()); err != nil {
|
|
|
|
t.Fatalf("os.Remove(lock) failed: %v", err)
|
|
|
|
}
|
|
|
|
// Script loops and sleeps for 100ms. Give a bit a time for it to exit in
|
|
|
|
// case pause didn't work.
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
// Verify that the two processes still exist.
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := getAndCheckProcLists(cont, expectedPL); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Resume the running container.
|
|
|
|
if err := cont.Resume(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
expectedPL2 := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
// Verify that deleting the file triggered the process to exit.
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := waitForProcessList(cont, expectedPL2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestPauseResumeStatus makes sure that the statuses are set correctly
|
|
|
|
// with calls to pause and resume and that pausing and resuming only
|
|
|
|
// occurs given the correct state.
|
|
|
|
func TestPauseResumeStatus(t *testing.T) {
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "20")
|
2018-07-23 20:30:29 +00:00
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-06-19 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-06-19 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pause the running container.
|
|
|
|
if err := cont.Pause(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-06-15 23:08:20 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-06-19 22:22:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to Pause again. Should cause error.
|
|
|
|
if err := cont.Pause(); err == nil {
|
|
|
|
t.Errorf("error pausing container that was already paused: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resume the running container.
|
|
|
|
if err := cont.Resume(); err != nil {
|
|
|
|
t.Errorf("error resuming container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to resume again. Should cause error.
|
|
|
|
if err := cont.Resume(); err == nil {
|
|
|
|
t.Errorf("error resuming container already running: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-06-15 23:08:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// TestCapabilities verifies that:
|
|
|
|
// - Running exec as non-root UID and GID will result in an error (because the
|
|
|
|
// executable file can't be read).
|
|
|
|
// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips
|
|
|
|
// this check.
|
|
|
|
func TestCapabilities(t *testing.T) {
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
// Pick uid/gid different than ours.
|
|
|
|
uid := auth.KUID(os.Getuid() + 1)
|
|
|
|
gid := auth.KGID(os.Getgid() + 1)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, conf := range configs(all...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Destroy()
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "exe",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := waitForProcessList(s, expectedPL[:1]); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for sleep to start, err: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create an executable that can't be run with the specified UID:GID.
|
|
|
|
// This shouldn't be callable within the container until we add the
|
|
|
|
// CAP_DAC_OVERRIDE capability to skip the access check.
|
|
|
|
exePath := filepath.Join(rootDir, "exe")
|
|
|
|
if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
|
|
|
|
t.Fatalf("couldn't create executable: %v", err)
|
|
|
|
}
|
|
|
|
defer os.Remove(exePath)
|
|
|
|
|
|
|
|
// Need to traverse the intermediate directory.
|
|
|
|
os.Chmod(rootDir, 0755)
|
|
|
|
|
|
|
|
execArgs := control.ExecArgs{
|
|
|
|
Filename: exePath,
|
|
|
|
Argv: []string{exePath},
|
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
|
|
|
KGID: gid,
|
|
|
|
Capabilities: &auth.TaskCapabilities{},
|
|
|
|
}
|
|
|
|
|
|
|
|
// "exe" should fail because we don't have the necessary permissions.
|
|
|
|
if _, err := s.Execute(&execArgs); err == nil {
|
|
|
|
t.Fatalf("container executed without error, but an error was expected")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we run with the capability enabled and should succeed.
|
|
|
|
execArgs.Capabilities = &auth.TaskCapabilities{
|
|
|
|
EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
|
|
|
|
}
|
|
|
|
// "exe" should not fail this time.
|
|
|
|
if _, err := s.Execute(&execArgs); err != nil {
|
|
|
|
t.Fatalf("container failed to exec %v: %v", execArgs, err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that an tty FD is sent over the console socket if one is provided.
|
|
|
|
func TestConsoleSocket(t *testing.T) {
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, conf := range configs(all...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
spec := testutil.NewSpecWithArgs("true")
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create a named socket and start listening. We use a relative path
|
|
|
|
// to avoid overflowing the unix path length limit (108 chars).
|
|
|
|
socketPath := filepath.Join(bundleDir, "socket")
|
|
|
|
cwd, err := os.Getwd()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting cwd: %v", err)
|
|
|
|
}
|
|
|
|
socketRelPath, err := filepath.Rel(cwd, socketPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting relative path for %q from cwd %q: %v", socketPath, cwd, err)
|
|
|
|
}
|
|
|
|
if len(socketRelPath) > len(socketPath) {
|
|
|
|
socketRelPath = socketPath
|
|
|
|
}
|
|
|
|
srv, err := unet.BindAndListen(socketRelPath, false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error binding and listening to socket %q: %v", socketPath, err)
|
|
|
|
}
|
|
|
|
defer os.Remove(socketPath)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create the container and pass the socket name.
|
|
|
|
id := testutil.UniqueContainerID()
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(id, spec, conf, bundleDir, socketRelPath, "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Open the othe end of the socket.
|
|
|
|
sock, err := srv.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error accepting socket connection: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Allow 3 fds to be received. We only expect 1.
|
|
|
|
r := sock.Reader(true /* blocking */)
|
|
|
|
r.EnableFDs(1)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// The socket is closed right after sending the FD, so EOF is
|
|
|
|
// an allowed error.
|
|
|
|
b := [][]byte{{}}
|
|
|
|
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
|
|
|
|
t.Fatalf("error reading from socket connection: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// We should have gotten a control message.
|
|
|
|
fds, err := r.ExtractFDs()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error extracting fds from socket connection: %v", err)
|
|
|
|
}
|
|
|
|
if len(fds) != 1 {
|
|
|
|
t.Fatalf("got %d fds from socket, wanted 1", len(fds))
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that the fd is a terminal.
|
|
|
|
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
|
|
|
|
t.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Shut it down.
|
|
|
|
if err := s.Destroy(); err != nil {
|
|
|
|
t.Fatalf("error destroying container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Close socket.
|
|
|
|
if err := srv.Close(); err != nil {
|
|
|
|
t.Fatalf("error destroying container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
// TestRunNonRoot checks that sandbox can be configured when running as
|
2018-06-12 20:24:22 +00:00
|
|
|
// non-privileged user.
|
2018-05-24 21:27:05 +00:00
|
|
|
func TestRunNonRoot(t *testing.T) {
|
2018-08-17 20:05:59 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/true")
|
|
|
|
spec.Process.User.UID = 343
|
|
|
|
spec.Process.User.GID = 2401
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// User that container runs as can't list '$TMP/blocked' and would fail to
|
|
|
|
// mount it.
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := os.Chmod(dir, 0700); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
|
|
|
|
}
|
|
|
|
dir = path.Join(dir, "test")
|
|
|
|
if err := os.Mkdir(dir, 0755); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
|
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := run(spec, conf); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("error running sandbox: %v", err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
}
|
2018-06-04 19:30:47 +00:00
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2018-06-06 23:12:58 +00:00
|
|
|
// TestMountNewDir checks that runsc will create destination directory if it
|
2018-06-04 19:30:47 +00:00
|
|
|
// doesn't exit.
|
|
|
|
func TestMountNewDir(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
root, err := ioutil.TempDir(testutil.TmpDir(), "root")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("ioutil.TempDir() failed:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
srcDir := path.Join(root, "src", "dir", "anotherdir")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := os.MkdirAll(srcDir, 0755); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err)
|
|
|
|
}
|
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
mountDir := path.Join(root, "dir", "anotherdir")
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/ls", mountDir)
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
|
|
|
Destination: mountDir,
|
|
|
|
Source: srcDir,
|
|
|
|
Type: "bind",
|
|
|
|
})
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := run(spec, conf); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("error running sandbox: %v", err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-06 23:12:58 +00:00
|
|
|
|
2018-07-03 19:00:09 +00:00
|
|
|
func TestReadonlyRoot(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/touch", "/foo")
|
|
|
|
spec.Root.Readonly = true
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create, start and wait for the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Destroy()
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
ws, err := s.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
2018-07-03 19:00:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReadonlyMount(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
2018-08-20 18:25:42 +00:00
|
|
|
Destination: dir,
|
2018-07-23 20:30:29 +00:00
|
|
|
Source: dir,
|
|
|
|
Type: "bind",
|
|
|
|
Options: []string{"ro"},
|
|
|
|
})
|
|
|
|
spec.Root.Readonly = false
|
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create, start and wait for the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Destroy()
|
|
|
|
if err := s.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
ws, err := s.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
2018-07-03 19:00:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-06 23:12:58 +00:00
|
|
|
// TestAbbreviatedIDs checks that runsc supports using abbreviated container
|
|
|
|
// IDs in place of full IDs.
|
|
|
|
func TestAbbreviatedIDs(t *testing.T) {
|
|
|
|
cids := []string{
|
|
|
|
"foo-" + testutil.UniqueContainerID(),
|
|
|
|
"bar-" + testutil.UniqueContainerID(),
|
|
|
|
"baz-" + testutil.UniqueContainerID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
rootDir, err := testutil.SetupRootDir()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating root dir: %v", err)
|
|
|
|
}
|
|
|
|
for _, cid := range cids {
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-07-23 20:30:29 +00:00
|
|
|
conf := testutil.TestConfig()
|
|
|
|
bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)
|
2018-06-06 23:12:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
cont, err := Create(cid, spec, conf, bundleDir, "", "")
|
2018-06-06 23:12:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
}
|
|
|
|
|
|
|
|
// These should all be unambigious.
|
|
|
|
unambiguous := map[string]string{
|
|
|
|
"f": cids[0],
|
|
|
|
cids[0]: cids[0],
|
|
|
|
"bar": cids[1],
|
|
|
|
cids[1]: cids[1],
|
|
|
|
"baz": cids[2],
|
|
|
|
cids[2]: cids[2],
|
|
|
|
}
|
|
|
|
for shortid, longid := range unambiguous {
|
2018-08-21 21:01:14 +00:00
|
|
|
if _, err := Load(rootDir, shortid); err != nil {
|
2018-06-06 23:12:58 +00:00
|
|
|
t.Errorf("%q should resolve to %q: %v", shortid, longid, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// These should be ambiguous.
|
|
|
|
ambiguous := []string{
|
|
|
|
"b",
|
|
|
|
"ba",
|
|
|
|
}
|
|
|
|
for _, shortid := range ambiguous {
|
2018-08-21 21:01:14 +00:00
|
|
|
if s, err := Load(rootDir, shortid); err == nil {
|
2018-06-06 23:12:58 +00:00
|
|
|
t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 04:42:21 +00:00
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
// Check that modifications to a volume mount are propigated into and out of
|
|
|
|
// the sandbox.
|
|
|
|
func TestContainerVolumeContentsShared(t *testing.T) {
|
|
|
|
// Only run this test with shared proxy, since that is the only
|
|
|
|
// behavior it is testing.
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
conf.FileAccess = boot.FileAccessProxy
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
|
|
|
// Main process just sleeps. We will use "exec" to probe the state of
|
|
|
|
// the filesystem.
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "1000")
|
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "root-fs-test")
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("TempDir failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// File that will be used to check consistency inside/outside sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
filename := filepath.Join(dir, "file")
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
|
|
|
|
// File does not exist yet. Reading from the sandbox should fail.
|
|
|
|
execArgsTestFile := control.ExecArgs{
|
|
|
|
Filename: "/usr/bin/test",
|
2018-08-20 18:25:42 +00:00
|
|
|
Argv: []string{"test", "-f", filename},
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
if ws, err := c.Execute(&execArgsTestFile); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error testing file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() == 0 {
|
|
|
|
t.Errorf("test %q exited with code %v, wanted not zero", ws.ExitStatus(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the file from outside of the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
if err := ioutil.WriteFile(filename, []byte("foobar"), 0777); err != nil {
|
|
|
|
t.Fatalf("error writing to file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we should be able to test the file from within the sandbox.
|
|
|
|
if ws, err := c.Execute(&execArgsTestFile); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error testing file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() != 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("test %q exited with code %v, wanted zero", filename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Rename the file from outside of the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
newFilename := filepath.Join(dir, "newfile")
|
|
|
|
if err := os.Rename(filename, newFilename); err != nil {
|
|
|
|
t.Fatalf("os.Rename(%q, %q) failed: %v", filename, newFilename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// File should no longer exist at the old path within the sandbox.
|
|
|
|
if ws, err := c.Execute(&execArgsTestFile); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error testing file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() == 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("test %q exited with code %v, wanted not zero", filename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should be able to test the new filename from within the sandbox.
|
|
|
|
execArgsTestNewFile := control.ExecArgs{
|
|
|
|
Filename: "/usr/bin/test",
|
2018-08-20 18:25:42 +00:00
|
|
|
Argv: []string{"test", "-f", newFilename},
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
if ws, err := c.Execute(&execArgsTestNewFile); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() != 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("test %q exited with code %v, wanted zero", newFilename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the renamed file from outside of the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
if err := os.Remove(newFilename); err != nil {
|
|
|
|
t.Fatalf("error removing file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Renamed file should no longer exist at the old path within the sandbox.
|
|
|
|
if ws, err := c.Execute(&execArgsTestNewFile); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() == 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("test %q exited with code %v, wanted not zero", newFilename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now create the file from WITHIN the sandbox.
|
|
|
|
execArgsTouch := control.ExecArgs{
|
|
|
|
Filename: "/usr/bin/touch",
|
2018-08-20 18:25:42 +00:00
|
|
|
Argv: []string{"touch", filename},
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
KUID: auth.KUID(os.Getuid()),
|
|
|
|
KGID: auth.KGID(os.Getgid()),
|
|
|
|
}
|
|
|
|
if ws, err := c.Execute(&execArgsTouch); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error touching file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() != 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("touch %q exited with code %v, wanted zero", filename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// File should exist outside the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
if _, err := os.Stat(filename); err != nil {
|
|
|
|
t.Errorf("stat %q got error %v, wanted nil", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// File should exist outside the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
if _, err := os.Stat(filename); err != nil {
|
|
|
|
t.Errorf("stat %q got error %v, wanted nil", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the file from within the sandbox.
|
|
|
|
execArgsRemove := control.ExecArgs{
|
|
|
|
Filename: "/bin/rm",
|
2018-08-20 18:25:42 +00:00
|
|
|
Argv: []string{"rm", filename},
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
if ws, err := c.Execute(&execArgsRemove); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("unexpected error removing file %q: %v", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
} else if ws.ExitStatus() != 0 {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Errorf("remove %q exited with code %v, wanted zero", filename, ws.ExitStatus())
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// File should not exist outside the sandbox.
|
2018-08-20 18:25:42 +00:00
|
|
|
if _, err := os.Stat(filename); !os.IsNotExist(err) {
|
|
|
|
t.Errorf("stat %q got error %v, wanted ErrNotExist", filename, err)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-21 20:13:34 +00:00
|
|
|
|
|
|
|
func TestGoferExits(t *testing.T) {
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2018-08-21 21:01:14 +00:00
|
|
|
c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
|
2018-08-21 20:13:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sandboxProc, err := os.FindProcess(c.Sandbox.Pid)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error finding sandbox process: %v", err)
|
|
|
|
}
|
|
|
|
gofer, err := os.FindProcess(c.GoferPid)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error finding sandbox process: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill sandbox and expect gofer to exit on its own.
|
|
|
|
if err := sandboxProc.Kill(); err != nil {
|
|
|
|
t.Fatalf("error killing sandbox process: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := sandboxProc.Wait(); err != nil {
|
|
|
|
t.Fatalf("error waiting for sandbox process: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := gofer.Wait(); err != nil {
|
|
|
|
t.Fatalf("error waiting for gofer process: %v", err)
|
|
|
|
}
|
|
|
|
if c.IsRunning() {
|
|
|
|
t.Errorf("container shouldn't be running, container: %+v", c)
|
|
|
|
}
|
|
|
|
}
|
2018-08-27 18:09:06 +00:00
|
|
|
|
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
testutil.RunAsRoot(m)
|
|
|
|
}
|