2019-04-29 21:25:05 +00:00
|
|
|
// Copyright 2018 The gVisor Authors.
|
2018-04-27 17:37:02 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-08-21 21:01:14 +00:00
|
|
|
package container
|
2018-04-27 17:37:02 +00:00
|
|
|
|
|
|
|
import (
|
2018-08-10 21:31:56 +00:00
|
|
|
"bytes"
|
2019-09-04 05:01:34 +00:00
|
|
|
"flag"
|
2018-04-27 17:37:02 +00:00
|
|
|
"fmt"
|
2019-07-08 19:55:37 +00:00
|
|
|
"io"
|
2018-04-27 17:37:02 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2018-05-24 21:27:05 +00:00
|
|
|
"path"
|
2018-04-27 17:37:02 +00:00
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
2018-06-29 21:46:45 +00:00
|
|
|
"strconv"
|
2018-04-27 17:37:02 +00:00
|
|
|
"strings"
|
2018-05-17 18:54:36 +00:00
|
|
|
"sync"
|
2018-04-27 17:37:02 +00:00
|
|
|
"syscall"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-09-27 22:00:03 +00:00
|
|
|
"github.com/cenkalti/backoff"
|
2018-04-27 17:37:02 +00:00
|
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/abi/linux"
|
|
|
|
"gvisor.dev/gvisor/pkg/log"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/control"
|
|
|
|
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
|
|
|
|
"gvisor.dev/gvisor/runsc/boot"
|
2019-07-04 05:50:26 +00:00
|
|
|
"gvisor.dev/gvisor/runsc/boot/platforms"
|
2019-06-13 23:49:09 +00:00
|
|
|
"gvisor.dev/gvisor/runsc/specutils"
|
2019-09-04 05:01:34 +00:00
|
|
|
"gvisor.dev/gvisor/runsc/testutil"
|
2018-04-27 17:37:02 +00:00
|
|
|
)
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// waitForProcessList waits for the given process list to show up in the container.
|
2018-09-27 22:00:03 +00:00
|
|
|
func waitForProcessList(cont *Container, want []*control.Process) error {
|
|
|
|
cb := func() error {
|
|
|
|
got, err := cont.Processes()
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-09-27 22:00:03 +00:00
|
|
|
err = fmt.Errorf("error getting process data from container: %v", err)
|
|
|
|
return &backoff.PermanentError{Err: err}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-09-27 22:00:03 +00:00
|
|
|
if !procListsEqual(got, want) {
|
|
|
|
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-09-27 22:00:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-09-28 05:52:25 +00:00
|
|
|
// Gives plenty of time as tests can run slow under --race.
|
|
|
|
return testutil.Poll(cb, 30*time.Second)
|
2018-09-27 22:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func waitForProcessCount(cont *Container, want int) error {
|
|
|
|
cb := func() error {
|
|
|
|
pss, err := cont.Processes()
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error getting process data from container: %v", err)
|
|
|
|
return &backoff.PermanentError{Err: err}
|
|
|
|
}
|
|
|
|
if got := len(pss); got != want {
|
|
|
|
return fmt.Errorf("wrong process count, got: %d, want: %d", got, want)
|
|
|
|
}
|
|
|
|
return nil
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-09-28 05:52:25 +00:00
|
|
|
// Gives plenty of time as tests can run slow under --race.
|
|
|
|
return testutil.Poll(cb, 30*time.Second)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-11 18:31:21 +00:00
|
|
|
func blockUntilWaitable(pid int) error {
|
2019-08-02 20:46:42 +00:00
|
|
|
_, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {
|
2019-01-11 18:31:21 +00:00
|
|
|
var err error
|
|
|
|
_, _, err1 := syscall.Syscall6(syscall.SYS_WAITID, 1, uintptr(pid), 0, syscall.WEXITED|syscall.WNOWAIT, 0, 0)
|
|
|
|
if err1 != 0 {
|
|
|
|
err = err1
|
|
|
|
}
|
|
|
|
return 0, 0, err
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
// procListsEqual is used to check whether 2 Process lists are equal for all
|
|
|
|
// implemented fields.
|
|
|
|
func procListsEqual(got, want []*control.Process) bool {
|
|
|
|
if len(got) != len(want) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range got {
|
|
|
|
pd1 := got[i]
|
|
|
|
pd2 := want[i]
|
|
|
|
// Zero out unimplemented and timing dependant fields.
|
2018-06-28 21:55:46 +00:00
|
|
|
pd1.Time = ""
|
|
|
|
pd1.STime = ""
|
|
|
|
pd1.C = 0
|
2018-05-24 21:27:05 +00:00
|
|
|
if *pd1 != *pd2 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-06-19 22:22:23 +00:00
|
|
|
// getAndCheckProcLists is similar to waitForProcessList, but does not wait and retry the
|
|
|
|
// test for equality. This is because we already confirmed that exec occurred.
|
2018-08-21 21:01:14 +00:00
|
|
|
func getAndCheckProcLists(cont *Container, want []*control.Process) error {
|
2018-06-19 22:22:23 +00:00
|
|
|
got, err := cont.Processes()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error getting process data from container: %v", err)
|
|
|
|
}
|
|
|
|
if procListsEqual(got, want) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
|
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
func procListToString(pl []*control.Process) string {
|
|
|
|
strs := make([]string, 0, len(pl))
|
|
|
|
for _, p := range pl {
|
|
|
|
strs = append(strs, fmt.Sprintf("%+v", p))
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("[%s]", strings.Join(strs, ","))
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// createWriteableOutputFile creates an output file that can be read and
|
|
|
|
// written to in the sandbox.
|
2018-06-29 21:46:45 +00:00
|
|
|
func createWriteableOutputFile(path string) (*os.File, error) {
|
|
|
|
outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error creating file: %q, %v", path, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chmod to allow writing after umask.
|
|
|
|
if err := outputFile.Chmod(0666); err != nil {
|
|
|
|
return nil, fmt.Errorf("error chmoding file: %q, %v", path, err)
|
|
|
|
}
|
|
|
|
return outputFile, nil
|
|
|
|
}
|
|
|
|
|
2018-07-26 00:36:52 +00:00
|
|
|
func waitForFile(f *os.File) error {
|
|
|
|
op := func() error {
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fi.Size() == 0 {
|
|
|
|
return fmt.Errorf("file %q is empty", f.Name())
|
|
|
|
}
|
|
|
|
return nil
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-08-27 21:25:21 +00:00
|
|
|
|
2019-09-04 05:01:34 +00:00
|
|
|
return testutil.Poll(op, 30*time.Second)
|
2018-07-26 00:36:52 +00:00
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// readOutputNum reads a file at given filepath and returns the int at the
|
|
|
|
// requested position.
|
|
|
|
func readOutputNum(file string, position int) (int, error) {
|
|
|
|
f, err := os.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error opening file: %q, %v", file, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that there is content in output file.
|
2018-07-26 00:36:52 +00:00
|
|
|
if err := waitForFile(f); err != nil {
|
2018-08-10 21:31:56 +00:00
|
|
|
return 0, fmt.Errorf("error waiting for output file: %v", err)
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 22:44:34 +00:00
|
|
|
b, err := ioutil.ReadAll(f)
|
2018-06-29 21:46:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error reading file: %v", err)
|
|
|
|
}
|
2018-07-18 22:44:34 +00:00
|
|
|
if len(b) == 0 {
|
2018-06-29 21:46:45 +00:00
|
|
|
return 0, fmt.Errorf("error no content was read")
|
|
|
|
}
|
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Strip leading null bytes caused by file offset not being 0 upon restore.
|
|
|
|
b = bytes.Trim(b, "\x00")
|
2018-07-18 22:44:34 +00:00
|
|
|
nums := strings.Split(string(b), "\n")
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
if position >= len(nums) {
|
|
|
|
return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums)
|
|
|
|
}
|
|
|
|
if position == -1 {
|
|
|
|
// Expectation of newline at the end of last position.
|
|
|
|
position = len(nums) - 2
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
num, err := strconv.Atoi(nums[position])
|
2018-06-29 21:46:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error getting number from file: %v", err)
|
|
|
|
}
|
|
|
|
return num, nil
|
|
|
|
}
|
|
|
|
|
2018-06-04 19:30:47 +00:00
|
|
|
// run starts the sandbox and waits for it to exit, checking that the
|
|
|
|
// application succeeded.
|
2018-07-23 20:30:29 +00:00
|
|
|
func run(spec *specs.Spec, conf *boot.Config) error {
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-06-04 19:30:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create, start and wait for the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
2019-06-18 22:34:58 +00:00
|
|
|
Attached: true,
|
2019-06-18 21:45:50 +00:00
|
|
|
}
|
2019-06-18 22:34:58 +00:00
|
|
|
ws, err := Run(conf, args)
|
2018-06-04 19:30:47 +00:00
|
|
|
if err != nil {
|
2018-10-11 18:55:45 +00:00
|
|
|
return fmt.Errorf("running container: %v", err)
|
2018-06-04 19:30:47 +00:00
|
|
|
}
|
|
|
|
if !ws.Exited() || ws.ExitStatus() != 0 {
|
|
|
|
return fmt.Errorf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
type configOption int
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
const (
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
overlay configOption = iota
|
2018-07-23 20:30:29 +00:00
|
|
|
kvm
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
nonExclusiveFS
|
2018-07-23 20:30:29 +00:00
|
|
|
)
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
|
2018-08-27 21:25:21 +00:00
|
|
|
var noOverlay = []configOption{kvm, nonExclusiveFS}
|
|
|
|
var all = append(noOverlay, overlay)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// configs generates different configurations to run tests.
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
func configs(opts ...configOption) []*boot.Config {
|
|
|
|
// Always load the default config.
|
|
|
|
cs := []*boot.Config{testutil.TestConfig()}
|
2018-06-20 00:03:55 +00:00
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, o := range opts {
|
2018-07-23 20:30:29 +00:00
|
|
|
c := testutil.TestConfig()
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
switch o {
|
|
|
|
case overlay:
|
|
|
|
c.Overlay = true
|
|
|
|
case kvm:
|
2019-04-29 21:03:04 +00:00
|
|
|
// TODO(b/112165693): KVM tests are flaky. Disable until fixed.
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
continue
|
|
|
|
|
2019-07-04 05:50:26 +00:00
|
|
|
c.Platform = platforms.KVM
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
case nonExclusiveFS:
|
2018-09-07 19:27:44 +00:00
|
|
|
c.FileAccess = boot.FileAccessShared
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown config option %v", o))
|
|
|
|
|
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
cs = append(cs, c)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
return cs
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
|
|
|
|
// It verifies after each step that the container can be loaded from disk, and
|
|
|
|
// has the correct status.
|
|
|
|
func TestLifecycle(t *testing.T) {
|
2019-01-11 18:31:21 +00:00
|
|
|
// Start the child reaper.
|
|
|
|
childReaper := &testutil.Reaper{}
|
|
|
|
childReaper.Start()
|
|
|
|
defer childReaper.Stop()
|
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, conf := range configs(all...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
// The container will just sleep for a long time. We will kill it before
|
|
|
|
// it finishes sleeping.
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-05-17 18:54:36 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
2018-05-17 18:54:36 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-09-18 22:20:19 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error creating container: %v", err)
|
2018-05-17 18:54:36 +00:00
|
|
|
}
|
2018-09-18 22:20:19 +00:00
|
|
|
defer c.Destroy()
|
2018-05-17 18:54:36 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Load the container from disk and check the status.
|
2019-06-18 21:45:50 +00:00
|
|
|
c, err = Load(rootDir, args.ID)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-09-18 04:17:31 +00:00
|
|
|
if got, want := c.Status, Created; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// List should return the container id.
|
2018-08-21 21:01:14 +00:00
|
|
|
ids, err := List(rootDir)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error listing containers: %v", err)
|
|
|
|
}
|
2019-06-18 21:45:50 +00:00
|
|
|
if got, want := ids, []string{args.ID}; !reflect.DeepEqual(got, want) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container list got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Start the container.
|
2018-09-18 04:17:31 +00:00
|
|
|
if err := c.Start(conf); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-09-13 23:36:53 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Load the container from disk and check the status.
|
2019-06-18 21:45:50 +00:00
|
|
|
c, err = Load(rootDir, args.ID)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-09-18 04:17:31 +00:00
|
|
|
if got, want := c.Status, Running; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" is running.
|
2018-09-18 04:17:31 +00:00
|
|
|
if err := waitForProcessList(c, expectedPL); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Wait on the container.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
ch := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
ch <- struct{}{}
|
2018-09-18 04:17:31 +00:00
|
|
|
ws, err := c.Wait()
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := ws.Signal(), syscall.SIGTERM; got != want {
|
|
|
|
t.Fatalf("got signal %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
2018-09-06 04:13:46 +00:00
|
|
|
// Wait a bit to ensure that we've started waiting on the
|
|
|
|
// container before we signal.
|
2018-07-23 20:30:29 +00:00
|
|
|
<-ch
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Send the container a SIGTERM which will cause it to stop.
|
2018-10-17 17:50:24 +00:00
|
|
|
if err := c.SignalContainer(syscall.SIGTERM, false); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err)
|
|
|
|
}
|
|
|
|
// Wait for it to die.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Load the container from disk and check the status.
|
2019-06-18 21:45:50 +00:00
|
|
|
c, err = Load(rootDir, args.ID)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
2018-09-18 04:17:31 +00:00
|
|
|
if got, want := c.Status, Stopped; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Destroy the container.
|
2018-09-18 04:17:31 +00:00
|
|
|
if err := c.Destroy(); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error destroying container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// List should not return the container id.
|
2018-08-21 21:01:14 +00:00
|
|
|
ids, err = List(rootDir)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error listing containers: %v", err)
|
|
|
|
}
|
|
|
|
if len(ids) != 0 {
|
|
|
|
t.Errorf("expected container list to be empty, but got %v", ids)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Loading the container by id should fail.
|
2019-06-18 21:45:50 +00:00
|
|
|
if _, err = Load(rootDir, args.ID); err == nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("expected loading destroyed container to fail, but it did not")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Test the we can execute the application with different path formats.
|
|
|
|
func TestExePath(t *testing.T) {
|
2019-07-08 19:55:37 +00:00
|
|
|
// Create two directories that will be prepended to PATH.
|
|
|
|
firstPath, err := ioutil.TempDir(testutil.TmpDir(), "first")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
secondPath, err := ioutil.TempDir(testutil.TmpDir(), "second")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create two minimal executables in the second path, two of which
|
|
|
|
// will be masked by files in first path.
|
|
|
|
for _, p := range []string{"unmasked", "masked1", "masked2"} {
|
|
|
|
path := filepath.Join(secondPath, p)
|
|
|
|
f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0777)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
if _, err := io.WriteString(f, "#!/bin/true\n"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a non-executable file in the first path which masks a healthy
|
|
|
|
// executable in the second.
|
|
|
|
nonExecutable := filepath.Join(firstPath, "masked1")
|
|
|
|
f2, err := os.OpenFile(nonExecutable, os.O_CREATE|os.O_EXCL, 0666)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
f2.Close()
|
|
|
|
|
|
|
|
// Create a non-regular file in the first path which masks a healthy
|
|
|
|
// executable in the second.
|
|
|
|
nonRegular := filepath.Join(firstPath, "masked2")
|
|
|
|
if err := os.Mkdir(nonRegular, 0777); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
for _, test := range []struct {
|
|
|
|
path string
|
|
|
|
success bool
|
|
|
|
}{
|
|
|
|
{path: "true", success: true},
|
|
|
|
{path: "bin/true", success: true},
|
|
|
|
{path: "/bin/true", success: true},
|
|
|
|
{path: "thisfiledoesntexit", success: false},
|
|
|
|
{path: "bin/thisfiledoesntexit", success: false},
|
|
|
|
{path: "/bin/thisfiledoesntexit", success: false},
|
2019-07-08 19:55:37 +00:00
|
|
|
|
|
|
|
{path: "unmasked", success: true},
|
|
|
|
{path: filepath.Join(firstPath, "unmasked"), success: false},
|
|
|
|
{path: filepath.Join(secondPath, "unmasked"), success: true},
|
|
|
|
|
|
|
|
{path: "masked1", success: true},
|
|
|
|
{path: filepath.Join(firstPath, "masked1"), success: false},
|
|
|
|
{path: filepath.Join(secondPath, "masked1"), success: true},
|
|
|
|
|
|
|
|
{path: "masked2", success: true},
|
|
|
|
{path: filepath.Join(firstPath, "masked2"), success: false},
|
|
|
|
{path: filepath.Join(secondPath, "masked2"), success: true},
|
2018-07-23 20:30:29 +00:00
|
|
|
} {
|
|
|
|
spec := testutil.NewSpecWithArgs(test.path)
|
2019-07-08 19:55:37 +00:00
|
|
|
spec.Process.Env = []string{
|
|
|
|
fmt.Sprintf("PATH=%s:%s:%s", firstPath, secondPath, os.Getenv("PATH")),
|
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("exec: %s, error setting up container: %v", test.path, err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
2018-07-23 20:30:29 +00:00
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
2019-06-18 22:34:58 +00:00
|
|
|
Attached: true,
|
2019-06-18 21:45:50 +00:00
|
|
|
}
|
2019-06-18 22:34:58 +00:00
|
|
|
ws, err := Run(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
|
|
|
|
os.RemoveAll(rootDir)
|
|
|
|
os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
if test.success {
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("exec: %s, error running container: %v", test.path, err)
|
|
|
|
}
|
|
|
|
if ws.ExitStatus() != 0 {
|
|
|
|
t.Errorf("exec: %s, got exit status %v want %v", test.path, ws.ExitStatus(), 0)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("exec: %s, got: no error, want: error", test.path)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// Test the we can retrieve the application exit status from the container.
|
2018-04-27 17:37:02 +00:00
|
|
|
func TestAppExitStatus(t *testing.T) {
|
2018-05-15 17:17:19 +00:00
|
|
|
// First container will succeed.
|
2018-06-04 18:25:40 +00:00
|
|
|
succSpec := testutil.NewSpecWithArgs("true")
|
2018-07-23 20:30:29 +00:00
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(succSpec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: succSpec,
|
|
|
|
BundleDir: bundleDir,
|
2019-06-18 22:34:58 +00:00
|
|
|
Attached: true,
|
2019-06-18 21:45:50 +00:00
|
|
|
}
|
2019-06-18 22:34:58 +00:00
|
|
|
ws, err := Run(conf, args)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error running container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if ws.ExitStatus() != 0 {
|
|
|
|
t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// Second container exits with non-zero status.
|
2018-04-27 17:37:02 +00:00
|
|
|
wantStatus := 123
|
2018-06-04 18:25:40 +00:00
|
|
|
errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir2, bundleDir2, err := testutil.SetupContainer(errSpec, conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error setting up container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir2)
|
|
|
|
defer os.RemoveAll(bundleDir2)
|
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
args2 := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: errSpec,
|
|
|
|
BundleDir: bundleDir2,
|
2019-06-18 22:34:58 +00:00
|
|
|
Attached: true,
|
2019-06-18 21:45:50 +00:00
|
|
|
}
|
2019-06-18 22:34:58 +00:00
|
|
|
ws, err = Run(conf, args2)
|
2018-04-27 17:37:02 +00:00
|
|
|
if err != nil {
|
2018-05-15 17:17:19 +00:00
|
|
|
t.Fatalf("error running container: %v", err)
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
if ws.ExitStatus() != wantStatus {
|
|
|
|
t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 17:17:19 +00:00
|
|
|
// TestExec verifies that a container can exec a new program.
|
2018-04-27 17:37:02 +00:00
|
|
|
func TestExec(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
const uid = 343
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
2018-09-12 22:22:24 +00:00
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" is running.
|
2018-09-12 22:22:24 +00:00
|
|
|
if err := waitForProcessList(cont, expectedPL[:1]); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs := &control.ExecArgs{
|
2018-07-23 20:30:29 +00:00
|
|
|
Filename: "/bin/sleep",
|
2018-09-17 23:24:05 +00:00
|
|
|
Argv: []string{"/bin/sleep", "5"},
|
2018-07-23 20:30:29 +00:00
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 100" and "sleep 5" are running after exec.
|
|
|
|
// First, start running exec (whick blocks).
|
|
|
|
status := make(chan error, 1)
|
|
|
|
go func() {
|
2019-06-18 21:45:50 +00:00
|
|
|
exitStatus, err := cont.executeSync(execArgs)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
2018-09-12 22:22:24 +00:00
|
|
|
log.Debugf("error executing: %v", err)
|
2018-07-23 20:30:29 +00:00
|
|
|
status <- err
|
|
|
|
} else if exitStatus != 0 {
|
2018-09-12 22:22:24 +00:00
|
|
|
log.Debugf("bad status: %d", exitStatus)
|
2018-07-23 20:30:29 +00:00
|
|
|
status <- fmt.Errorf("failed with exit status: %v", exitStatus)
|
|
|
|
} else {
|
|
|
|
status <- nil
|
|
|
|
}
|
|
|
|
}()
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-09-12 22:22:24 +00:00
|
|
|
if err := waitForProcessList(cont, expectedPL); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that exec finished without error.
|
|
|
|
select {
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("container timed out waiting for exec to finish.")
|
|
|
|
case st := <-status:
|
|
|
|
if st != nil {
|
2018-09-12 22:22:24 +00:00
|
|
|
t.Errorf("container failed to exec %v: %v", args, err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-17 17:50:24 +00:00
|
|
|
// TestKillPid verifies that we can signal individual exec'd processes.
|
|
|
|
func TestKillPid(t *testing.T) {
|
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
2019-05-03 02:26:16 +00:00
|
|
|
app, err := testutil.FindFile("runsc/container/test_app/test_app")
|
2018-10-17 17:50:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("error finding test_app:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
const nProcs = 4
|
|
|
|
spec := testutil.NewSpecWithArgs(app, "task-tree", "--depth", strconv.Itoa(nProcs-1), "--width=1", "--pause=true")
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-10-17 17:50:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that all processes are running.
|
|
|
|
if err := waitForProcessCount(cont, nProcs); err != nil {
|
|
|
|
t.Fatalf("timed out waiting for processes to start: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the child process with the largest PID.
|
|
|
|
procs, err := cont.Processes()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to get process list: %v", err)
|
|
|
|
}
|
|
|
|
var pid int32
|
|
|
|
for _, p := range procs {
|
|
|
|
if pid < int32(p.PID) {
|
|
|
|
pid = int32(p.PID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := cont.SignalProcess(syscall.SIGKILL, pid); err != nil {
|
|
|
|
t.Fatalf("failed to signal process %d: %v", pid, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that one process is gone.
|
|
|
|
if err := waitForProcessCount(cont, nProcs-1); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-04-30 15:35:36 +00:00
|
|
|
|
|
|
|
procs, err = cont.Processes()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to get process list: %v", err)
|
|
|
|
}
|
|
|
|
for _, p := range procs {
|
|
|
|
if pid == int32(p.PID) {
|
|
|
|
t.Fatalf("pid %d is still alive, which should be killed", pid)
|
|
|
|
}
|
|
|
|
}
|
2018-10-17 17:50:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-29 21:46:45 +00:00
|
|
|
// TestCheckpointRestore creates a container that continuously writes successive integers
|
|
|
|
// to a file. To test checkpoint and restore functionality, the container is
|
|
|
|
// checkpointed and the last number printed to the file is recorded. Then, it is restored in two
|
|
|
|
// new containers and the first number printed from these containers is checked. Both should
|
|
|
|
// be the next consecutive number after the last number from the checkpointed container.
|
|
|
|
func TestCheckpointRestore(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
// Skip overlay because test requires writing to host file.
|
2018-08-21 21:34:00 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-18 22:44:34 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := os.Chmod(dir, 0777); err != nil {
|
|
|
|
t.Fatalf("error chmoding file: %q, %v", dir, err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
outputPath := filepath.Join(dir, "output")
|
|
|
|
outputFile, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath)
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("bash", "-c", script)
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Set the image path, which is where the checkpoint image will be saved.
|
|
|
|
imagePath := filepath.Join(dir, "test-image-file")
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create the image file and open for writing.
|
|
|
|
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error opening new file at imagePath: %v", err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-07-26 00:36:52 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Checkpoint running container; save state into new file.
|
|
|
|
if err := cont.Checkpoint(file); err != nil {
|
|
|
|
t.Fatalf("error checkpointing container to empty file: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(imagePath)
|
2018-06-12 20:24:22 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
lastNum, err := readOutputNum(outputPath, -1)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
|
|
|
outputFile2, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile2.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into a new container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args2 := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont2, err := New(conf, args2)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont2.Destroy()
|
2018-07-18 23:57:29 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := cont2.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile2); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
firstNum, err := readOutputNum(outputPath, 0)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Check that lastNum is one less than firstNum and that the container picks
|
|
|
|
// up from where it left off.
|
2018-07-23 20:30:29 +00:00
|
|
|
if lastNum+1 != firstNum {
|
|
|
|
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
|
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
cont2.Destroy()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into another container!
|
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
|
|
|
outputFile3, err := createWriteableOutputFile(outputPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile3.Close()
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Restore into a new container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args3 := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont3, err := New(conf, args3)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont3.Destroy()
|
2018-07-18 23:57:29 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := cont3.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile3); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
firstNum2, err := readOutputNum(outputPath, 0)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
2018-06-29 21:46:45 +00:00
|
|
|
|
2018-08-10 21:31:56 +00:00
|
|
|
// Check that lastNum is one less than firstNum and that the container picks
|
|
|
|
// up from where it left off.
|
2018-07-23 20:30:29 +00:00
|
|
|
if lastNum+1 != firstNum2 {
|
|
|
|
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
|
|
|
|
}
|
2018-08-10 21:31:56 +00:00
|
|
|
cont3.Destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestUnixDomainSockets checks that Checkpoint/Restore works in cases
|
|
|
|
// with filesystem Unix Domain Socket use.
|
|
|
|
func TestUnixDomainSockets(t *testing.T) {
|
|
|
|
// Skip overlay because test requires writing to host file.
|
2018-08-21 21:34:00 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-08-10 21:31:56 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
// UDS path is limited to 108 chars for compatibility with older systems.
|
2019-06-27 21:23:29 +00:00
|
|
|
// Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is
|
2018-08-22 06:06:11 +00:00
|
|
|
// not exceeded. Assumes '/tmp' exists in the system.
|
|
|
|
dir, err := ioutil.TempDir("/tmp", "uds-test")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir failed: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
outputPath := filepath.Join(dir, "uds_output")
|
2018-08-20 18:25:42 +00:00
|
|
|
outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile.Close()
|
|
|
|
|
2019-05-03 02:26:16 +00:00
|
|
|
app, err := testutil.FindFile("runsc/container/test_app/test_app")
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
2018-09-27 15:57:32 +00:00
|
|
|
t.Fatal("error finding test_app:", err)
|
2018-08-10 21:31:56 +00:00
|
|
|
}
|
|
|
|
|
2018-08-22 06:06:11 +00:00
|
|
|
socketPath := filepath.Join(dir, "uds_socket")
|
2018-08-10 21:31:56 +00:00
|
|
|
defer os.Remove(socketPath)
|
|
|
|
|
2018-09-27 15:57:32 +00:00
|
|
|
spec := testutil.NewSpecWithArgs(app, "uds", "--file", outputPath, "--socket", socketPath)
|
2018-08-10 21:31:56 +00:00
|
|
|
spec.Process.User = specs.User{
|
|
|
|
UID: uint32(os.Getuid()),
|
|
|
|
GID: uint32(os.Getgid()),
|
|
|
|
}
|
2018-09-21 21:05:46 +00:00
|
|
|
spec.Mounts = []specs.Mount{{
|
|
|
|
Type: "bind",
|
|
|
|
Destination: dir,
|
|
|
|
Source: dir,
|
|
|
|
}}
|
2018-08-10 21:31:56 +00:00
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the image path, the location where the checkpoint image will be saved.
|
|
|
|
imagePath := filepath.Join(dir, "test-image-file")
|
|
|
|
|
|
|
|
// Create the image file and open for writing.
|
|
|
|
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error opening new file at imagePath: %v", err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
defer os.RemoveAll(imagePath)
|
|
|
|
|
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checkpoint running container; save state into new file.
|
|
|
|
if err := cont.Checkpoint(file); err != nil {
|
|
|
|
t.Fatalf("error checkpointing container to empty file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read last number outputted before checkpoint.
|
|
|
|
lastNum, err := readOutputNum(outputPath, -1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete and recreate file before restoring.
|
|
|
|
if err := os.Remove(outputPath); err != nil {
|
|
|
|
t.Fatalf("error removing file")
|
|
|
|
}
|
2018-08-20 18:25:42 +00:00
|
|
|
outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer outputFile2.Close()
|
|
|
|
|
|
|
|
// Restore into a new container.
|
2019-06-18 21:45:50 +00:00
|
|
|
argsRestore := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
contRestore, err := New(conf, argsRestore)
|
2018-08-10 21:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer contRestore.Destroy()
|
|
|
|
|
|
|
|
if err := contRestore.Restore(spec, conf, imagePath); err != nil {
|
|
|
|
t.Fatalf("error restoring container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until application has ran.
|
|
|
|
if err := waitForFile(outputFile2); err != nil {
|
|
|
|
t.Fatalf("Failed to wait for output file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read first number outputted after restore.
|
|
|
|
firstNum, err := readOutputNum(outputPath, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error with outputFile: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that lastNum is one less than firstNum.
|
|
|
|
if lastNum+1 != firstNum {
|
|
|
|
t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum)
|
|
|
|
}
|
|
|
|
contRestore.Destroy()
|
2018-06-29 21:46:45 +00:00
|
|
|
}
|
2018-06-12 20:24:22 +00:00
|
|
|
}
|
|
|
|
|
2018-06-19 22:22:23 +00:00
|
|
|
// TestPauseResume tests that we can successfully pause and resume a container.
|
|
|
|
// It checks starts running sleep and executes another sleep. It pauses and checks
|
|
|
|
// that both processes are still running: sleep will be paused and still exist.
|
|
|
|
// It will then unpause and confirm that both processes are running. Then it will
|
|
|
|
// wait until one sleep completes and check to make sure the other is running.
|
|
|
|
func TestPauseResume(t *testing.T) {
|
2018-08-17 20:05:59 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
const uid = 343
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "20")
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
lock, err := ioutil.TempFile(testutil.TmpDir(), "lock")
|
2018-07-27 17:08:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating output file: %v", err)
|
|
|
|
}
|
|
|
|
defer lock.Close()
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
2018-07-27 17:08:59 +00:00
|
|
|
Cmd: "bash",
|
2018-07-23 20:30:29 +00:00
|
|
|
},
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
script := fmt.Sprintf("while [[ -f %q ]]; do sleep 0.1; done", lock.Name())
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs := &control.ExecArgs{
|
2018-07-27 17:08:59 +00:00
|
|
|
Filename: "/bin/bash",
|
|
|
|
Argv: []string{"bash", "-c", script},
|
2018-07-23 20:30:29 +00:00
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-09-12 22:22:24 +00:00
|
|
|
// First, start running exec.
|
2019-06-18 21:45:50 +00:00
|
|
|
_, err = cont.Execute(execArgs)
|
2018-09-12 22:22:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error executing: %v", err)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Verify that "sleep 5" is running.
|
|
|
|
if err := waitForProcessList(cont, expectedPL); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Pause the running container.
|
|
|
|
if err := cont.Pause(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
if err := os.Remove(lock.Name()); err != nil {
|
|
|
|
t.Fatalf("os.Remove(lock) failed: %v", err)
|
|
|
|
}
|
|
|
|
// Script loops and sleeps for 100ms. Give a bit a time for it to exit in
|
|
|
|
// case pause didn't work.
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
// Verify that the two processes still exist.
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := getAndCheckProcLists(cont, expectedPL); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-15 23:08:20 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Resume the running container.
|
|
|
|
if err := cont.Resume(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
expectedPL2 := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
|
2018-07-27 17:08:59 +00:00
|
|
|
// Verify that deleting the file triggered the process to exit.
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := waitForProcessList(cont, expectedPL2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-19 22:22:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestPauseResumeStatus makes sure that the statuses are set correctly
|
|
|
|
// with calls to pause and resume and that pausing and resuming only
|
|
|
|
// occurs given the correct state.
|
|
|
|
func TestPauseResumeStatus(t *testing.T) {
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "20")
|
2018-07-23 20:30:29 +00:00
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
2018-06-19 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-06-19 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pause the running container.
|
|
|
|
if err := cont.Pause(); err != nil {
|
|
|
|
t.Errorf("error pausing container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-06-15 23:08:20 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-06-19 22:22:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to Pause again. Should cause error.
|
|
|
|
if err := cont.Pause(); err == nil {
|
|
|
|
t.Errorf("error pausing container that was already paused: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Paused; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resume the running container.
|
|
|
|
if err := cont.Resume(); err != nil {
|
|
|
|
t.Errorf("error resuming container: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to resume again. Should cause error.
|
|
|
|
if err := cont.Resume(); err == nil {
|
|
|
|
t.Errorf("error resuming container already running: %v", err)
|
|
|
|
}
|
2018-08-21 21:01:14 +00:00
|
|
|
if got, want := cont.Status, Running; got != want {
|
2018-06-19 22:22:23 +00:00
|
|
|
t.Errorf("container status got %v, want %v", got, want)
|
2018-06-15 23:08:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-27 17:37:02 +00:00
|
|
|
// TestCapabilities verifies that:
|
|
|
|
// - Running exec as non-root UID and GID will result in an error (because the
|
|
|
|
// executable file can't be read).
|
|
|
|
// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips
|
|
|
|
// this check.
|
|
|
|
func TestCapabilities(t *testing.T) {
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
// Pick uid/gid different than ours.
|
|
|
|
uid := auth.KUID(os.Getuid() + 1)
|
|
|
|
gid := auth.KGID(os.Getgid() + 1)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
runsc: Change cache policy for root fs and volume mounts.
Previously, gofer filesystems were configured with the default "fscache"
policy, which caches filesystem metadata and contents aggressively. While this
setting is best for performance, it means that changes from inside the sandbox
may not be immediately propagated outside the sandbox, and vice-versa.
This CL changes volumes and the root fs configuration to use a new
"remote-revalidate" cache policy which tries to retain as much caching as
possible while still making fs changes visible across the sandbox boundary.
This cache policy is enabled by default for the root filesystem. The default
value for the "--file-access" flag is still "proxy", but the behavior is
changed to use the new cache policy.
A new value for the "--file-access" flag is added, called "proxy-exclusive",
which turns on the previous aggressive caching behavior. As the name implies,
this flag should be used when the sandbox has "exclusive" access to the
filesystem.
All volume mounts are configured to use the new cache policy, since it is
safest and most likely to be correct. There is not currently a way to change
this behavior, but it's possible to add such a mechanism in the future. The
configurability is a smaller issue for volumes, since most of the expensive
application fs operations (walking + stating files) will likely served by the
root fs.
PiperOrigin-RevId: 208735037
Change-Id: Ife048fab1948205f6665df8563434dbc6ca8cfc9
2018-08-14 23:24:46 +00:00
|
|
|
for _, conf := range configs(all...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
2018-09-12 22:22:24 +00:00
|
|
|
defer cont.Destroy()
|
|
|
|
if err := cont.Start(conf); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// expectedPL lists the expected process state of the container.
|
|
|
|
expectedPL := []*control.Process{
|
|
|
|
{
|
|
|
|
UID: 0,
|
|
|
|
PID: 1,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "sleep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UID: uid,
|
|
|
|
PID: 2,
|
|
|
|
PPID: 0,
|
|
|
|
C: 0,
|
|
|
|
Cmd: "exe",
|
|
|
|
},
|
|
|
|
}
|
2018-09-12 22:22:24 +00:00
|
|
|
if err := waitForProcessList(cont, expectedPL[:1]); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("Failed to wait for sleep to start, err: %v", err)
|
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create an executable that can't be run with the specified UID:GID.
|
|
|
|
// This shouldn't be callable within the container until we add the
|
|
|
|
// CAP_DAC_OVERRIDE capability to skip the access check.
|
|
|
|
exePath := filepath.Join(rootDir, "exe")
|
|
|
|
if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
|
|
|
|
t.Fatalf("couldn't create executable: %v", err)
|
|
|
|
}
|
|
|
|
defer os.Remove(exePath)
|
|
|
|
|
|
|
|
// Need to traverse the intermediate directory.
|
|
|
|
os.Chmod(rootDir, 0755)
|
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs := &control.ExecArgs{
|
2018-07-23 20:30:29 +00:00
|
|
|
Filename: exePath,
|
|
|
|
Argv: []string{exePath},
|
|
|
|
WorkingDirectory: "/",
|
|
|
|
KUID: uid,
|
|
|
|
KGID: gid,
|
|
|
|
Capabilities: &auth.TaskCapabilities{},
|
|
|
|
}
|
|
|
|
|
|
|
|
// "exe" should fail because we don't have the necessary permissions.
|
2019-06-18 21:45:50 +00:00
|
|
|
if _, err := cont.executeSync(execArgs); err == nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("container executed without error, but an error was expected")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we run with the capability enabled and should succeed.
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs.Capabilities = &auth.TaskCapabilities{
|
2018-07-23 20:30:29 +00:00
|
|
|
EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
|
|
|
|
}
|
|
|
|
// "exe" should not fail this time.
|
2019-06-18 21:45:50 +00:00
|
|
|
if _, err := cont.executeSync(execArgs); err != nil {
|
2018-09-12 22:22:24 +00:00
|
|
|
t.Fatalf("container failed to exec %v: %v", args, err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-24 21:27:05 +00:00
|
|
|
// TestRunNonRoot checks that sandbox can be configured when running as
|
2018-06-12 20:24:22 +00:00
|
|
|
// non-privileged user.
|
2018-05-24 21:27:05 +00:00
|
|
|
func TestRunNonRoot(t *testing.T) {
|
2018-08-17 20:05:59 +00:00
|
|
|
for _, conf := range configs(noOverlay...) {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/true")
|
2019-08-27 17:46:06 +00:00
|
|
|
|
|
|
|
// Set a random user/group with no access to "blocked" dir.
|
2018-07-23 20:30:29 +00:00
|
|
|
spec.Process.User.UID = 343
|
|
|
|
spec.Process.User.GID = 2401
|
2019-08-27 17:46:06 +00:00
|
|
|
spec.Process.Capabilities = nil
|
2018-04-27 17:37:02 +00:00
|
|
|
|
2019-08-27 17:46:06 +00:00
|
|
|
// User running inside container can't list '$TMP/blocked' and would fail to
|
2018-07-23 20:30:29 +00:00
|
|
|
// mount it.
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := os.Chmod(dir, 0700); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
|
|
|
|
}
|
|
|
|
dir = path.Join(dir, "test")
|
|
|
|
if err := os.Mkdir(dir, 0755); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
|
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2019-08-27 17:46:06 +00:00
|
|
|
src, err := ioutil.TempDir(testutil.TmpDir(), "src")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
|
|
|
Destination: dir,
|
|
|
|
Source: src,
|
|
|
|
Type: "bind",
|
|
|
|
})
|
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := run(spec, conf); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("error running sandbox: %v", err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
}
|
2018-06-04 19:30:47 +00:00
|
|
|
}
|
2018-05-24 21:27:05 +00:00
|
|
|
|
2018-06-06 23:12:58 +00:00
|
|
|
// TestMountNewDir checks that runsc will create destination directory if it
|
2018-06-04 19:30:47 +00:00
|
|
|
// doesn't exit.
|
|
|
|
func TestMountNewDir(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
root, err := ioutil.TempDir(testutil.TmpDir(), "root")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("ioutil.TempDir() failed:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
srcDir := path.Join(root, "src", "dir", "anotherdir")
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := os.MkdirAll(srcDir, 0755); err != nil {
|
|
|
|
t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err)
|
|
|
|
}
|
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
mountDir := path.Join(root, "dir", "anotherdir")
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/ls", mountDir)
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
|
|
|
Destination: mountDir,
|
|
|
|
Source: srcDir,
|
|
|
|
Type: "bind",
|
|
|
|
})
|
2018-06-04 19:30:47 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
if err := run(spec, conf); err != nil {
|
2018-08-20 18:25:42 +00:00
|
|
|
t.Fatalf("error running sandbox: %v", err)
|
2018-07-23 20:30:29 +00:00
|
|
|
}
|
2018-04-27 17:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-06 23:12:58 +00:00
|
|
|
|
2018-07-03 19:00:09 +00:00
|
|
|
func TestReadonlyRoot(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/touch", "/foo")
|
|
|
|
spec.Root.Readonly = true
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create, start and wait for the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
2018-09-18 04:17:31 +00:00
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-09-13 23:36:53 +00:00
|
|
|
|
2018-09-18 04:17:31 +00:00
|
|
|
ws, err := c.Wait()
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
2018-07-03 19:00:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-09 18:30:35 +00:00
|
|
|
func TestUIDMap(t *testing.T) {
|
|
|
|
for _, conf := range configs(noOverlay...) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(testDir)
|
|
|
|
testFile := path.Join(testDir, "testfile")
|
|
|
|
|
|
|
|
spec := testutil.NewSpecWithArgs("touch", "/tmp/testfile")
|
|
|
|
uid := os.Getuid()
|
|
|
|
gid := os.Getgid()
|
|
|
|
spec.Linux = &specs.Linux{
|
|
|
|
Namespaces: []specs.LinuxNamespace{
|
|
|
|
{Type: specs.UserNamespace},
|
|
|
|
{Type: specs.PIDNamespace},
|
|
|
|
{Type: specs.MountNamespace},
|
|
|
|
},
|
|
|
|
UIDMappings: []specs.LinuxIDMapping{
|
|
|
|
{
|
|
|
|
ContainerID: 0,
|
|
|
|
HostID: uint32(uid),
|
|
|
|
Size: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
GIDMappings: []specs.LinuxIDMapping{
|
|
|
|
{
|
|
|
|
ContainerID: 0,
|
|
|
|
HostID: uint32(gid),
|
|
|
|
Size: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
|
|
|
Destination: "/tmp",
|
|
|
|
Source: testDir,
|
|
|
|
Type: "bind",
|
|
|
|
})
|
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create, start and wait for the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2019-04-09 18:30:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ws, err := c.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || ws.ExitStatus() != 0 {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
|
|
|
st := syscall.Stat_t{}
|
|
|
|
if err := syscall.Stat(testFile, &st); err != nil {
|
|
|
|
t.Fatalf("error stat /testfile: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if st.Uid != uint32(uid) || st.Gid != uint32(gid) {
|
|
|
|
t.Fatalf("UID: %d (%d) GID: %d (%d)", st.Uid, uid, st.Gid, gid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-03 19:00:09 +00:00
|
|
|
func TestReadonlyMount(t *testing.T) {
|
2018-07-23 20:30:29 +00:00
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-08-20 18:25:42 +00:00
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
2018-08-20 18:25:42 +00:00
|
|
|
Destination: dir,
|
2018-07-23 20:30:29 +00:00
|
|
|
Source: dir,
|
|
|
|
Type: "bind",
|
|
|
|
Options: []string{"ro"},
|
|
|
|
})
|
|
|
|
spec.Root.Readonly = false
|
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
2018-07-03 19:00:09 +00:00
|
|
|
|
2018-07-23 20:30:29 +00:00
|
|
|
// Create, start and wait for the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
2018-09-18 04:17:31 +00:00
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
2018-07-23 20:30:29 +00:00
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
2018-09-13 23:36:53 +00:00
|
|
|
|
2018-09-18 04:17:31 +00:00
|
|
|
ws, err := c.Wait()
|
2018-07-23 20:30:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
2018-07-03 19:00:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-06 23:12:58 +00:00
|
|
|
// TestAbbreviatedIDs checks that runsc supports using abbreviated container
|
|
|
|
// IDs in place of full IDs.
|
|
|
|
func TestAbbreviatedIDs(t *testing.T) {
|
2018-11-08 05:30:11 +00:00
|
|
|
rootDir, err := testutil.SetupRootDir()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating root dir: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
|
|
|
|
conf := testutil.TestConfigWithRoot(rootDir)
|
|
|
|
|
2018-06-06 23:12:58 +00:00
|
|
|
cids := []string{
|
|
|
|
"foo-" + testutil.UniqueContainerID(),
|
|
|
|
"bar-" + testutil.UniqueContainerID(),
|
|
|
|
"baz-" + testutil.UniqueContainerID(),
|
|
|
|
}
|
|
|
|
for _, cid := range cids {
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "100")
|
2018-11-08 05:30:11 +00:00
|
|
|
bundleDir, err := testutil.SetupBundleDir(spec)
|
2018-06-06 23:12:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: cid,
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2018-06-06 23:12:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
}
|
|
|
|
|
|
|
|
// These should all be unambigious.
|
|
|
|
unambiguous := map[string]string{
|
|
|
|
"f": cids[0],
|
|
|
|
cids[0]: cids[0],
|
|
|
|
"bar": cids[1],
|
|
|
|
cids[1]: cids[1],
|
|
|
|
"baz": cids[2],
|
|
|
|
cids[2]: cids[2],
|
|
|
|
}
|
|
|
|
for shortid, longid := range unambiguous {
|
2018-08-21 21:01:14 +00:00
|
|
|
if _, err := Load(rootDir, shortid); err != nil {
|
2018-06-06 23:12:58 +00:00
|
|
|
t.Errorf("%q should resolve to %q: %v", shortid, longid, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// These should be ambiguous.
|
|
|
|
ambiguous := []string{
|
|
|
|
"b",
|
|
|
|
"ba",
|
|
|
|
}
|
|
|
|
for _, shortid := range ambiguous {
|
2018-08-21 21:01:14 +00:00
|
|
|
if s, err := Load(rootDir, shortid); err == nil {
|
2018-06-06 23:12:58 +00:00
|
|
|
t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 04:42:21 +00:00
|
|
|
|
2018-08-21 20:13:34 +00:00
|
|
|
func TestGoferExits(t *testing.T) {
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-08-21 20:13:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-09-18 22:20:19 +00:00
|
|
|
// Kill sandbox and expect gofer to exit on its own.
|
2018-08-21 20:13:34 +00:00
|
|
|
sandboxProc, err := os.FindProcess(c.Sandbox.Pid)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error finding sandbox process: %v", err)
|
|
|
|
}
|
|
|
|
if err := sandboxProc.Kill(); err != nil {
|
|
|
|
t.Fatalf("error killing sandbox process: %v", err)
|
|
|
|
}
|
2018-09-13 23:36:53 +00:00
|
|
|
|
2019-01-11 18:31:21 +00:00
|
|
|
err = blockUntilWaitable(c.GoferPid)
|
2018-09-18 22:20:19 +00:00
|
|
|
if err != nil && err != syscall.ECHILD {
|
|
|
|
t.Errorf("error waiting for gofer to exit: %v", err)
|
2018-08-21 20:13:34 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-27 18:09:06 +00:00
|
|
|
|
2018-10-01 06:22:13 +00:00
|
|
|
func TestRootNotMount(t *testing.T) {
|
2019-05-03 02:26:16 +00:00
|
|
|
appSym, err := testutil.FindFile("runsc/container/test_app/test_app")
|
2019-01-25 22:38:10 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("error finding test_app:", err)
|
|
|
|
}
|
2019-09-04 05:01:34 +00:00
|
|
|
|
2019-01-25 22:38:10 +00:00
|
|
|
app, err := filepath.EvalSymlinks(appSym)
|
2018-10-01 06:22:13 +00:00
|
|
|
if err != nil {
|
2019-01-25 22:38:10 +00:00
|
|
|
t.Fatalf("error resolving %q symlink: %v", appSym, err)
|
2018-10-01 06:22:13 +00:00
|
|
|
}
|
2019-01-25 22:38:10 +00:00
|
|
|
log.Infof("App path %q is a symlink to %q", appSym, app)
|
|
|
|
|
2019-09-04 05:01:34 +00:00
|
|
|
static, err := testutil.IsStatic(app)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading application binary: %v", err)
|
|
|
|
}
|
|
|
|
if !static {
|
|
|
|
// This happens during race builds; we cannot map in shared
|
|
|
|
// libraries also, so we need to skip the test.
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
|
2019-01-25 22:38:10 +00:00
|
|
|
root := filepath.Dir(app)
|
|
|
|
exe := "/" + filepath.Base(app)
|
|
|
|
log.Infof("Executing %q in %q", exe, root)
|
|
|
|
|
|
|
|
spec := testutil.NewSpecWithArgs(exe, "help")
|
2018-10-01 06:22:13 +00:00
|
|
|
spec.Root.Path = root
|
|
|
|
spec.Root.Readonly = true
|
2019-01-25 22:38:10 +00:00
|
|
|
spec.Mounts = nil
|
2018-10-01 06:22:13 +00:00
|
|
|
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
if err := run(spec, conf); err != nil {
|
|
|
|
t.Fatalf("error running sandbox: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 18:55:45 +00:00
|
|
|
func TestUserLog(t *testing.T) {
|
2019-05-03 02:26:16 +00:00
|
|
|
app, err := testutil.FindFile("runsc/container/test_app/test_app")
|
2018-10-11 18:55:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("error finding test_app:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// sched_rr_get_interval = 148 - not implemented in gvisor.
|
|
|
|
spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall=148")
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "user_log_test")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating tmp dir: %v", err)
|
|
|
|
}
|
|
|
|
userLog := filepath.Join(dir, "user.log")
|
|
|
|
|
|
|
|
// Create, start and wait for the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
UserLog: userLog,
|
2019-06-18 22:34:58 +00:00
|
|
|
Attached: true,
|
2019-06-18 21:45:50 +00:00
|
|
|
}
|
2019-06-18 22:34:58 +00:00
|
|
|
ws, err := Run(conf, args)
|
2018-10-11 18:55:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error running container: %v", err)
|
|
|
|
}
|
|
|
|
if !ws.Exited() || ws.ExitStatus() != 0 {
|
|
|
|
t.Fatalf("container failed, waitStatus: %v", ws)
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := ioutil.ReadFile(userLog)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error opening user log file %q: %v", userLog, err)
|
|
|
|
}
|
|
|
|
if want := "Unsupported syscall: sched_rr_get_interval"; !strings.Contains(string(out), want) {
|
|
|
|
t.Errorf("user log file doesn't contain %q, out: %s", want, string(out))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 23:05:44 +00:00
|
|
|
func TestWaitOnExitedSandbox(t *testing.T) {
|
|
|
|
for _, conf := range configs(all...) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
2018-11-15 23:34:38 +00:00
|
|
|
// Run a shell that sleeps for 1 second and then exits with a
|
|
|
|
// non-zero code.
|
2018-10-11 23:05:44 +00:00
|
|
|
const wantExit = 17
|
2018-11-15 23:34:38 +00:00
|
|
|
cmd := fmt.Sprintf("sleep 1; exit %d", wantExit)
|
2018-10-11 23:05:44 +00:00
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sh", "-c", cmd)
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create and Start the container.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-10-11 23:05:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Destroy()
|
|
|
|
if err := c.Start(conf); err != nil {
|
|
|
|
t.Fatalf("error starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-11-15 23:34:38 +00:00
|
|
|
// Wait on the sandbox. This will make an RPC to the sandbox
|
|
|
|
// and get the actual exit status of the application.
|
2018-10-11 23:05:44 +00:00
|
|
|
ws, err := c.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
2018-11-15 23:34:38 +00:00
|
|
|
if got := ws.ExitStatus(); got != wantExit {
|
|
|
|
t.Errorf("got exit status %d, want %d", got, wantExit)
|
|
|
|
}
|
2018-10-11 23:05:44 +00:00
|
|
|
|
2018-11-15 23:34:38 +00:00
|
|
|
// Now the sandbox has exited, but the zombie sandbox process
|
|
|
|
// still exists. Calling Wait() now will return the sandbox
|
|
|
|
// exit status.
|
|
|
|
ws, err = c.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error waiting on container: %v", err)
|
|
|
|
}
|
2018-10-11 23:05:44 +00:00
|
|
|
if got := ws.ExitStatus(); got != wantExit {
|
|
|
|
t.Errorf("got exit status %d, want %d", got, wantExit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:28:45 +00:00
|
|
|
func TestDestroyNotStarted(t *testing.T) {
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create the container and check that it can be destroyed.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-11-06 05:28:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
if err := c.Destroy(); err != nil {
|
|
|
|
t.Fatalf("deleting non-started container failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDestroyStarting attempts to force a race between start and destroy.
|
|
|
|
func TestDestroyStarting(t *testing.T) {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
|
|
|
// Create the container and check that it can be destroyed.
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
c, err := New(conf, args)
|
2018-11-06 05:28:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Container is not thread safe, so load another instance to run in
|
|
|
|
// concurrently.
|
2019-06-18 21:45:50 +00:00
|
|
|
startCont, err := Load(rootDir, args.ID)
|
2018-11-06 05:28:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error loading container: %v", err)
|
|
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
// Ignore failures, start can fail if destroy runs first.
|
|
|
|
startCont.Start(conf)
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := c.Destroy(); err != nil {
|
|
|
|
t.Errorf("deleting non-started container failed: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-15 22:12:19 +00:00
|
|
|
func TestCreateWorkingDir(t *testing.T) {
|
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
|
|
|
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
dir := path.Join(tmpDir, "new/working/dir")
|
|
|
|
|
|
|
|
// touch will fail if the directory doesn't exist.
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
|
|
|
|
spec.Process.Cwd = dir
|
|
|
|
spec.Root.Readonly = true
|
|
|
|
|
|
|
|
if err := run(spec, conf); err != nil {
|
|
|
|
t.Fatalf("Error running container: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-18 19:29:43 +00:00
|
|
|
// TestMountPropagation verifies that mount propagates to slave but not to
|
|
|
|
// private mounts.
|
|
|
|
func TestMountPropagation(t *testing.T) {
|
|
|
|
// Setup dir structure:
|
|
|
|
// - src: is mounted as shared and is used as source for both private and
|
|
|
|
// slave mounts
|
|
|
|
// - dir: will be bind mounted inside src and should propagate to slave
|
|
|
|
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "mount")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
src := filepath.Join(tmpDir, "src")
|
|
|
|
srcMnt := filepath.Join(src, "mnt")
|
|
|
|
dir := filepath.Join(tmpDir, "dir")
|
|
|
|
for _, path := range []string{src, srcMnt, dir} {
|
|
|
|
if err := os.MkdirAll(path, 0777); err != nil {
|
|
|
|
t.Fatalf("MkdirAll(%q): %v", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dirFile := filepath.Join(dir, "file")
|
|
|
|
f, err := os.Create(dirFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("os.Create(%q): %v", dirFile, err)
|
|
|
|
}
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
// Setup src as a shared mount.
|
|
|
|
if err := syscall.Mount(src, src, "bind", syscall.MS_BIND, ""); err != nil {
|
|
|
|
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
|
|
|
|
}
|
|
|
|
if err := syscall.Mount("", src, "", syscall.MS_SHARED, ""); err != nil {
|
|
|
|
t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
spec := testutil.NewSpecWithArgs("sleep", "1000")
|
|
|
|
|
|
|
|
priv := filepath.Join(tmpDir, "priv")
|
|
|
|
slave := filepath.Join(tmpDir, "slave")
|
|
|
|
spec.Mounts = []specs.Mount{
|
|
|
|
{
|
|
|
|
Source: src,
|
|
|
|
Destination: priv,
|
|
|
|
Type: "bind",
|
|
|
|
Options: []string{"private"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Source: src,
|
|
|
|
Destination: slave,
|
|
|
|
Type: "bind",
|
|
|
|
Options: []string{"slave"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
conf := testutil.TestConfig()
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2019-03-18 19:29:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After the container is started, mount dir inside source and check what
|
|
|
|
// happens to both destinations.
|
|
|
|
if err := syscall.Mount(dir, srcMnt, "bind", syscall.MS_BIND, ""); err != nil {
|
|
|
|
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that mount didn't propagate to private mount.
|
|
|
|
privFile := filepath.Join(priv, "mnt", "file")
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs := &control.ExecArgs{
|
2019-03-18 19:29:43 +00:00
|
|
|
Filename: "/usr/bin/test",
|
|
|
|
Argv: []string{"test", "!", "-f", privFile},
|
|
|
|
}
|
2019-06-18 21:45:50 +00:00
|
|
|
if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
|
2019-03-18 19:29:43 +00:00
|
|
|
t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that mount propagated to slave mount.
|
|
|
|
slaveFile := filepath.Join(slave, "mnt", "file")
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs = &control.ExecArgs{
|
2019-03-18 19:29:43 +00:00
|
|
|
Filename: "/usr/bin/test",
|
|
|
|
Argv: []string{"test", "-f", slaveFile},
|
|
|
|
}
|
2019-06-18 21:45:50 +00:00
|
|
|
if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
|
2019-03-18 19:29:43 +00:00
|
|
|
t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMountSymlink(t *testing.T) {
|
|
|
|
for _, conf := range configs(overlay) {
|
|
|
|
t.Logf("Running test with conf: %+v", conf)
|
|
|
|
|
|
|
|
dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ioutil.TempDir() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
source := path.Join(dir, "source")
|
|
|
|
target := path.Join(dir, "target")
|
|
|
|
for _, path := range []string{source, target} {
|
|
|
|
if err := os.MkdirAll(path, 0777); err != nil {
|
|
|
|
t.Fatalf("os.MkdirAll(): %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
f, err := os.Create(path.Join(source, "file"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("os.Create(): %v", err)
|
|
|
|
}
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
link := path.Join(dir, "link")
|
|
|
|
if err := os.Symlink(target, link); err != nil {
|
|
|
|
t.Fatalf("os.Symlink(%q, %q): %v", target, link, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
spec := testutil.NewSpecWithArgs("/bin/sleep", "1000")
|
|
|
|
|
|
|
|
// Mount to a symlink to ensure the mount code will follow it and mount
|
|
|
|
// at the symlink target.
|
|
|
|
spec.Mounts = append(spec.Mounts, specs.Mount{
|
|
|
|
Type: "bind",
|
|
|
|
Destination: link,
|
|
|
|
Source: source,
|
|
|
|
})
|
|
|
|
|
|
|
|
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error setting up container: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(rootDir)
|
|
|
|
defer os.RemoveAll(bundleDir)
|
|
|
|
|
2019-06-18 21:45:50 +00:00
|
|
|
args := Args{
|
|
|
|
ID: testutil.UniqueContainerID(),
|
|
|
|
Spec: spec,
|
|
|
|
BundleDir: bundleDir,
|
|
|
|
}
|
|
|
|
cont, err := New(conf, args)
|
2019-03-18 19:29:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("creating container: %v", err)
|
|
|
|
}
|
|
|
|
defer cont.Destroy()
|
|
|
|
|
|
|
|
if err := cont.Start(conf); err != nil {
|
|
|
|
t.Fatalf("starting container: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that symlink was resolved and mount was created where the symlink
|
|
|
|
// is pointing to.
|
|
|
|
file := path.Join(target, "file")
|
2019-06-18 21:45:50 +00:00
|
|
|
execArgs := &control.ExecArgs{
|
2019-03-18 19:29:43 +00:00
|
|
|
Filename: "/usr/bin/test",
|
|
|
|
Argv: []string{"test", "-f", file},
|
|
|
|
}
|
2019-06-18 21:45:50 +00:00
|
|
|
if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
|
2019-03-18 19:29:43 +00:00
|
|
|
t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 22:22:24 +00:00
|
|
|
// executeSync synchronously executes a new process.
|
|
|
|
func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {
|
|
|
|
pid, err := cont.Execute(args)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error executing: %v", err)
|
|
|
|
}
|
2019-06-04 01:14:52 +00:00
|
|
|
ws, err := cont.WaitPID(pid)
|
2018-09-12 22:22:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("error waiting: %v", err)
|
|
|
|
}
|
|
|
|
return ws, nil
|
|
|
|
}
|
|
|
|
|
2018-08-27 18:09:06 +00:00
|
|
|
func TestMain(m *testing.M) {
|
2018-11-15 23:34:38 +00:00
|
|
|
log.SetLevel(log.Debug)
|
2019-09-04 05:01:34 +00:00
|
|
|
flag.Parse()
|
2018-11-15 23:34:38 +00:00
|
|
|
if err := testutil.ConfigureExePath(); err != nil {
|
|
|
|
panic(err.Error())
|
|
|
|
}
|
2019-06-12 16:40:50 +00:00
|
|
|
specutils.MaybeRunAsRoot()
|
2018-09-18 22:20:19 +00:00
|
|
|
os.Exit(m.Run())
|
2018-08-27 18:09:06 +00:00
|
|
|
}
|