Port Startup and Density Benchmarks.

PiperOrigin-RevId: 325497346
This commit is contained in:
Zach Koopmans 2020-08-07 13:28:11 -07:00 committed by gVisor bot
parent 10c13bccaf
commit a7bd0a7012
12 changed files with 546 additions and 11 deletions

View File

@ -0,0 +1 @@
FROM alpine:latest

View File

@ -0,0 +1,3 @@
FROM ubuntu:bionic
RUN apt-get update && apt-get install -y wget

View File

@ -5,14 +5,20 @@ package(licenses = ["notice"])
go_library(
name = "base",
testonly = 1,
srcs = ["base.go"],
srcs = [
"base.go",
],
deps = ["//test/benchmarks/harness"],
)
go_test(
name = "base_test",
size = "small",
srcs = ["sysbench_test.go"],
size = "large",
srcs = [
"size_test.go",
"startup_test.go",
"sysbench_test.go",
],
library = ":base",
tags = [
# Requires docker and runsc to be configured before test runs.
@ -21,6 +27,7 @@ go_test(
],
deps = [
"//pkg/test/dockerutil",
"//test/benchmarks/harness",
"//test/benchmarks/tools",
],
)

View File

@ -22,10 +22,10 @@ import (
"gvisor.dev/gvisor/test/benchmarks/harness"
)
var h harness.Harness
var testHarness harness.Harness
// TestMain is the main method for package network.
func TestMain(m *testing.M) {
h.Init()
testHarness.Init()
os.Exit(m.Run())
}

View File

@ -0,0 +1,220 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"context"
"testing"
"time"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
"gvisor.dev/gvisor/test/benchmarks/tools"
)
// BenchmarkSizeEmpty creates N empty containers and reads memory usage from
// /proc/meminfo.
func BenchmarkSizeEmpty(b *testing.B) {
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer machine.CleanUp()
meminfo := tools.Meminfo{}
ctx := context.Background()
containers := make([]*dockerutil.Container, 0, b.N)
// DropCaches before the test.
harness.DropCaches(machine)
// Check available memory on 'machine'.
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to get meminfo: %v", err)
}
// Make N containers.
for i := 0; i < b.N; i++ {
container := machine.GetContainer(ctx, b)
containers = append(containers, container)
if err := container.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/alpine",
}, "sh", "-c", "echo Hello && sleep 1000"); err != nil {
cleanUpContainers(ctx, containers)
b.Fatalf("failed to run container: %v", err)
}
if _, err := container.WaitForOutputSubmatch(ctx, "Hello", 5*time.Second); err != nil {
cleanUpContainers(ctx, containers)
b.Fatalf("failed to read container output: %v", err)
}
}
// Drop caches again before second measurement.
harness.DropCaches(machine)
// Check available memory after containers are up.
after, err := machine.RunCommand(cmd, args...)
cleanUpContainers(ctx, containers)
if err != nil {
b.Fatalf("failed to get meminfo: %v", err)
}
meminfo.Report(b, before, after)
}
// BenchmarkSizeNginx starts N containers running Nginx, checks that they're
// serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNginx(b *testing.B) {
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
// DropCaches for the first measurement.
harness.DropCaches(machine)
// Measure MemAvailable before creating containers.
meminfo := tools.Meminfo{}
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
// Make N Nginx containers.
ctx := context.Background()
runOpts := dockerutil.RunOpts{
Image: "benchmarks/nginx",
}
const port = 80
servers := startServers(ctx, b,
serverArgs{
machine: machine,
port: port,
runOpts: runOpts,
})
defer cleanUpContainers(ctx, servers)
// DropCaches after servers are created.
harness.DropCaches(machine)
// Take after measurement.
after, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
meminfo.Report(b, before, after)
}
// BenchmarkSizeNode starts N containers running a Node app, checks that
// they're serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNode(b *testing.B) {
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
// Make a redis instance for Node to connect.
ctx := context.Background()
redis, redisIP := redisInstance(ctx, b, machine)
defer redis.CleanUp(ctx)
// DropCaches after redis is created.
harness.DropCaches(machine)
// Take before measurement.
meminfo := tools.Meminfo{}
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo commend: %v", err)
}
// Create N Node servers.
runOpts := dockerutil.RunOpts{
Image: "benchmarks/node",
WorkDir: "/usr/src/app",
Links: []string{redis.MakeLink("redis")},
}
nodeCmd := []string{"node", "index.js", redisIP.String()}
const port = 8080
servers := startServers(ctx, b,
serverArgs{
machine: machine,
port: port,
runOpts: runOpts,
cmd: nodeCmd,
})
defer cleanUpContainers(ctx, servers)
// DropCaches after servers are created.
harness.DropCaches(machine)
// Take after measurement.
cmd, args = meminfo.MakeCmd()
after, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
meminfo.Report(b, before, after)
}
// serverArgs wraps args for startServers and runServerWorkload.
type serverArgs struct {
machine harness.Machine
port int
runOpts dockerutil.RunOpts
cmd []string
}
// startServers starts b.N containers defined by 'runOpts' and 'cmd' and uses
// 'machine' to check that each is up.
func startServers(ctx context.Context, b *testing.B, args serverArgs) []*dockerutil.Container {
b.Helper()
servers := make([]*dockerutil.Container, 0, b.N)
// Create N servers and wait until each of them is serving.
for i := 0; i < b.N; i++ {
server := args.machine.GetContainer(ctx, b)
servers = append(servers, server)
if err := server.Spawn(ctx, args.runOpts, args.cmd...); err != nil {
cleanUpContainers(ctx, servers)
b.Fatalf("failed to spawn node instance: %v", err)
}
// Get the container IP.
servingIP, err := server.FindIP(ctx, false)
if err != nil {
cleanUpContainers(ctx, servers)
b.Fatalf("failed to get ip from server: %v", err)
}
// Wait until the server is up.
if err := harness.WaitUntilServing(ctx, args.machine, servingIP, args.port); err != nil {
cleanUpContainers(ctx, servers)
b.Fatalf("failed to wait for serving")
}
}
return servers
}
// cleanUpContainers cleans up a slice of containers.
func cleanUpContainers(ctx context.Context, containers []*dockerutil.Container) {
for _, c := range containers {
if c != nil {
c.CleanUp(ctx)
}
}
}

View File

@ -0,0 +1,156 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"context"
"fmt"
"net"
"testing"
"time"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
)
// BenchmarkStartEmpty times startup time for an empty container.
func BenchmarkStartupEmpty(b *testing.B) {
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer machine.CleanUp()
ctx := context.Background()
for i := 0; i < b.N; i++ {
container := machine.GetContainer(ctx, b)
defer container.CleanUp(ctx)
if _, err := container.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/alpine",
}, "true"); err != nil {
b.Fatalf("failed to run container: %v", err)
}
}
}
// BenchmarkStartupNginx times startup for a Nginx instance.
// Time is measured from start until the first request is served.
func BenchmarkStartupNginx(b *testing.B) {
// The machine to hold Nginx and the Node Server.
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
ctx := context.Background()
runOpts := dockerutil.RunOpts{
Image: "benchmarks/nginx",
}
runServerWorkload(ctx, b,
serverArgs{
machine: machine,
runOpts: runOpts,
port: 80,
})
}
// BenchmarkStartupNode times startup for a Node application instance.
// Time is measured from start until the first request is served.
// Note that the Node app connects to a Redis instance before serving.
func BenchmarkStartupNode(b *testing.B) {
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
ctx := context.Background()
redis, redisIP := redisInstance(ctx, b, machine)
defer redis.CleanUp(ctx)
runOpts := dockerutil.RunOpts{
Image: "benchmarks/node",
WorkDir: "/usr/src/app",
Links: []string{redis.MakeLink("redis")},
}
cmd := []string{"node", "index.js", redisIP.String()}
runServerWorkload(ctx, b,
serverArgs{
machine: machine,
port: 8080,
runOpts: runOpts,
cmd: cmd,
})
}
// redisInstance returns a Redis container and its reachable IP.
func redisInstance(ctx context.Context, b *testing.B, machine harness.Machine) (*dockerutil.Container, net.IP) {
b.Helper()
// Spawn a redis instance for the app to use.
redis := machine.GetNativeContainer(ctx, b)
if err := redis.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/redis",
}); err != nil {
redis.CleanUp(ctx)
b.Fatalf("failed to spwan redis instance: %v", err)
}
if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
redis.CleanUp(ctx)
b.Fatalf("failed to start redis server: %v %s", err, out)
}
redisIP, err := redis.FindIP(ctx, false)
if err != nil {
redis.CleanUp(ctx)
b.Fatalf("failed to get IP from redis instance: %v", err)
}
return redis, redisIP
}
// runServerWorkload runs a server workload defined by 'runOpts' and 'cmd'.
// 'clientMachine' is used to connect to the server on 'serverMachine'.
func runServerWorkload(ctx context.Context, b *testing.B, args serverArgs) {
b.Helper()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := func() error {
server := args.machine.GetContainer(ctx, b)
defer func() {
b.StopTimer()
// Cleanup servers as we run so that we can go indefinitely.
server.CleanUp(ctx)
b.StartTimer()
}()
if err := server.Spawn(ctx, args.runOpts, args.cmd...); err != nil {
return fmt.Errorf("failed to spawn node instance: %v", err)
}
servingIP, err := server.FindIP(ctx, false)
if err != nil {
return fmt.Errorf("failed to get ip from server: %v", err)
}
// Wait until the Client sees the server as up.
if err := harness.WaitUntilServing(ctx, args.machine, servingIP, args.port); err != nil {
return fmt.Errorf("failed to wait for serving: %v", err)
}
return nil
}(); err != nil {
b.Fatal(err)
}
}
}

View File

@ -64,7 +64,7 @@ func BenchmarkSysbench(b *testing.B) {
},
}
machine, err := h.GetMachine()
machine, err := testHarness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}

View File

@ -23,23 +23,25 @@ import (
"gvisor.dev/gvisor/pkg/test/testutil"
)
//TODO(gvisor.dev/issue/3535): move to own package or move methods to harness struct.
// WaitUntilServing grabs a container from `machine` and waits for a server at
// IP:port.
func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
var logger testutil.DefaultLogger = "netcat"
var logger testutil.DefaultLogger = "util"
netcat := machine.GetNativeContainer(ctx, logger)
defer netcat.CleanUp(ctx)
cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server, port)
cmd := fmt.Sprintf("while ! wget -q --spider http://%s:%d; do true; done", server, port)
_, err := netcat.Run(ctx, dockerutil.RunOpts{
Image: "packetdrill",
Image: "benchmarks/util",
}, "sh", "-c", cmd)
return err
}
// DropCaches drops caches on the provided machine. Requires root.
func DropCaches(machine Machine) error {
if out, err := machine.RunCommand("/bin/sh", "-c", "sync | sysctl vm.drop_caches=3"); err != nil {
if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
return fmt.Errorf("failed to drop caches: %v logs: %s", err, out)
}
return nil

View File

@ -25,7 +25,7 @@ import (
// BenchmarkNginxConcurrency iterates the concurrency argument and tests
// how well the runtime under test handles requests in parallel.
// TODO(zkoopmans): Update with different doc sizes like Httpd.
// TODO(gvisor.dev/issue/3536): Update with different doc sizes like Httpd.
func BenchmarkNginxConcurrency(b *testing.B) {
// Grab a machine for the client and server.
clientMachine, err := h.GetMachine()

View File

@ -9,6 +9,7 @@ go_library(
"fio.go",
"hey.go",
"iperf.go",
"meminfo.go",
"redis.go",
"sysbench.go",
"tools.go",
@ -24,6 +25,7 @@ go_test(
"fio_test.go",
"hey_test.go",
"iperf_test.go",
"meminfo_test.go",
"redis_test.go",
"sysbench_test.go",
],

View File

@ -0,0 +1,60 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tools
import (
"fmt"
"regexp"
"strconv"
"testing"
)
// Meminfo wraps measurements of MemAvailable using /proc/meminfo.
type Meminfo struct {
}
// MakeCmd returns a command for checking meminfo.
func (*Meminfo) MakeCmd() (string, []string) {
return "cat", []string{"/proc/meminfo"}
}
// Report takes two reads of meminfo, parses them, and reports the difference
// divided by b.N.
func (*Meminfo) Report(b *testing.B, before, after string) {
b.Helper()
beforeVal, err := parseMemAvailable(before)
if err != nil {
b.Fatalf("could not parse before value %s: %v", before, err)
}
afterVal, err := parseMemAvailable(after)
if err != nil {
b.Fatalf("could not parse before value %s: %v", before, err)
}
val := 1024 * ((beforeVal - afterVal) / float64(b.N))
b.ReportMetric(val, "average_container_size_bytes")
}
var memInfoRE = regexp.MustCompile(`MemAvailable:\s*(\d+)\skB\n`)
// parseMemAvailable grabs the MemAvailable number from /proc/meminfo.
func parseMemAvailable(data string) (float64, error) {
match := memInfoRE.FindStringSubmatch(data)
if len(match) < 2 {
return 0, fmt.Errorf("couldn't find MemAvailable in %s", data)
}
return strconv.ParseFloat(match[1], 64)
}

View File

@ -0,0 +1,84 @@
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tools
import (
"testing"
)
// TestMeminfo checks the Meminfo parser on sample output.
func TestMeminfo(t *testing.T) {
sampleData := `
MemTotal: 16337408 kB
MemFree: 3742696 kB
MemAvailable: 9319948 kB
Buffers: 1433884 kB
Cached: 4607036 kB
SwapCached: 45284 kB
Active: 8288376 kB
Inactive: 2685928 kB
Active(anon): 4724912 kB
Inactive(anon): 1047940 kB
Active(file): 3563464 kB
Inactive(file): 1637988 kB
Unevictable: 326940 kB
Mlocked: 48 kB
SwapTotal: 33292284 kB
SwapFree: 32865736 kB
Dirty: 708 kB
Writeback: 0 kB
AnonPages: 4304204 kB
Mapped: 975424 kB
Shmem: 910292 kB
KReclaimable: 744532 kB
Slab: 1058448 kB
SReclaimable: 744532 kB
SUnreclaim: 313916 kB
KernelStack: 25188 kB
PageTables: 65300 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 41460988 kB
Committed_AS: 22859492 kB
VmallocTotal: 34359738367 kB
VmallocUsed: 63088 kB
VmallocChunk: 0 kB
Percpu: 9248 kB
HardwareCorrupted: 0 kB
AnonHugePages: 786432 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
FileHugePages: 0 kB
FilePmdMapped: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB
Hugetlb: 0 kB
DirectMap4k: 5408532 kB
DirectMap2M: 11241472 kB
DirectMap1G: 1048576 kB
`
want := 9319948.0
got, err := parseMemAvailable(sampleData)
if err != nil {
t.Fatalf("parseMemAvailable failed: %v", err)
}
if got != want {
t.Fatalf("parseMemAvailable got %f, want %f", got, want)
}
}