2020-07-31 04:15:34 +00:00
|
|
|
// Copyright 2020 The gVisor Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package tools
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"regexp"
|
|
|
|
"strconv"
|
|
|
|
"testing"
|
|
|
|
)
|
|
|
|
|
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases,
b.N was simply being ignored. This creates an implicit dependency in the
user passing a reasonable benchtime (less than or equal to the actual
runtime of the test, or using the X syntax) otherwise the test runs
forever.
- In cases where the above is impossible, explicitly set benchtime from
the test wrapper, to prevent the above behavior (tensorflow).
- Drop the *Reverse variants, which are simply hey benchmarks. We should
just add a hey benchmark. The platforms benchmarks already include a
native platform, and thus these benchmarks are incredibly confusing.
(In other words, BenchmarkNginxReverse has nothing to do with an nginx
benchmark for runsc.)
- Remove the redunant Harness object, which contains no state, in order
to slightly simplify the code.
- Make Block and Heap profiling actually work, but setting appropriate
runtime parameters (and plumbing them through the config).
- Split the profiling into two phases: start and stop, since some will
need to be started early, and others will need to happen at the end.
PiperOrigin-RevId: 349495377
2020-12-30 02:26:46 +00:00
|
|
|
const length = 64 * 1024
|
|
|
|
|
2020-07-31 04:15:34 +00:00
|
|
|
// Iperf is for the client side of `iperf`.
|
|
|
|
type Iperf struct {
|
2021-02-18 20:59:22 +00:00
|
|
|
Num int // Number of bytes to send in KB.
|
2020-07-31 04:15:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MakeCmd returns a iperf client command.
|
|
|
|
func (i *Iperf) MakeCmd(ip net.IP, port int) []string {
|
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases,
b.N was simply being ignored. This creates an implicit dependency in the
user passing a reasonable benchtime (less than or equal to the actual
runtime of the test, or using the X syntax) otherwise the test runs
forever.
- In cases where the above is impossible, explicitly set benchtime from
the test wrapper, to prevent the above behavior (tensorflow).
- Drop the *Reverse variants, which are simply hey benchmarks. We should
just add a hey benchmark. The platforms benchmarks already include a
native platform, and thus these benchmarks are incredibly confusing.
(In other words, BenchmarkNginxReverse has nothing to do with an nginx
benchmark for runsc.)
- Remove the redunant Harness object, which contains no state, in order
to slightly simplify the code.
- Make Block and Heap profiling actually work, but setting appropriate
runtime parameters (and plumbing them through the config).
- Split the profiling into two phases: start and stop, since some will
need to be started early, and others will need to happen at the end.
PiperOrigin-RevId: 349495377
2020-12-30 02:26:46 +00:00
|
|
|
return []string{
|
|
|
|
"iperf",
|
|
|
|
"--format", "K", // Output in KBytes.
|
2021-02-18 20:59:22 +00:00
|
|
|
"--realtime", // Measured in realtime.
|
|
|
|
"--num", fmt.Sprintf("%dK", i.Num), // Number of bytes to send in KB.
|
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases,
b.N was simply being ignored. This creates an implicit dependency in the
user passing a reasonable benchtime (less than or equal to the actual
runtime of the test, or using the X syntax) otherwise the test runs
forever.
- In cases where the above is impossible, explicitly set benchtime from
the test wrapper, to prevent the above behavior (tensorflow).
- Drop the *Reverse variants, which are simply hey benchmarks. We should
just add a hey benchmark. The platforms benchmarks already include a
native platform, and thus these benchmarks are incredibly confusing.
(In other words, BenchmarkNginxReverse has nothing to do with an nginx
benchmark for runsc.)
- Remove the redunant Harness object, which contains no state, in order
to slightly simplify the code.
- Make Block and Heap profiling actually work, but setting appropriate
runtime parameters (and plumbing them through the config).
- Split the profiling into two phases: start and stop, since some will
need to be started early, and others will need to happen at the end.
PiperOrigin-RevId: 349495377
2020-12-30 02:26:46 +00:00
|
|
|
"--length", fmt.Sprintf("%d", length),
|
|
|
|
"--client", ip.String(),
|
|
|
|
"--port", fmt.Sprintf("%d", port),
|
|
|
|
}
|
2020-07-31 04:15:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Report parses output from iperf client and reports metrics.
|
|
|
|
func (i *Iperf) Report(b *testing.B, output string) {
|
|
|
|
b.Helper()
|
|
|
|
// Parse bandwidth and report it.
|
|
|
|
bW, err := i.bandwidth(output)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to parse bandwitdth from %s: %v", output, err)
|
|
|
|
}
|
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases,
b.N was simply being ignored. This creates an implicit dependency in the
user passing a reasonable benchtime (less than or equal to the actual
runtime of the test, or using the X syntax) otherwise the test runs
forever.
- In cases where the above is impossible, explicitly set benchtime from
the test wrapper, to prevent the above behavior (tensorflow).
- Drop the *Reverse variants, which are simply hey benchmarks. We should
just add a hey benchmark. The platforms benchmarks already include a
native platform, and thus these benchmarks are incredibly confusing.
(In other words, BenchmarkNginxReverse has nothing to do with an nginx
benchmark for runsc.)
- Remove the redunant Harness object, which contains no state, in order
to slightly simplify the code.
- Make Block and Heap profiling actually work, but setting appropriate
runtime parameters (and plumbing them through the config).
- Split the profiling into two phases: start and stop, since some will
need to be started early, and others will need to happen at the end.
PiperOrigin-RevId: 349495377
2020-12-30 02:26:46 +00:00
|
|
|
b.SetBytes(length) // Measure Bytes/sec for b.N, although below is iperf output.
|
2020-10-09 21:26:55 +00:00
|
|
|
ReportCustomMetric(b, bW*1024, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
|
2020-07-31 04:15:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// bandwidth parses the Bandwidth number from an iperf report. A sample is below.
|
|
|
|
func (i *Iperf) bandwidth(data string) (float64, error) {
|
|
|
|
re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`)
|
|
|
|
match := re.FindStringSubmatch(data)
|
|
|
|
if len(match) < 1 {
|
|
|
|
return 0, fmt.Errorf("failed get bandwidth: %s", data)
|
|
|
|
}
|
|
|
|
return strconv.ParseFloat(match[1], 64)
|
|
|
|
}
|