Stop profiling when the sentry exits

Also removes `--profile-goroutine` because it's equivalent
to `debug --stacks`.

PiperOrigin-RevId: 325061502
This commit is contained in:
Fabricio Voznika 2020-08-05 11:28:15 -07:00 committed by gVisor bot
parent d0127b23f2
commit 190b1e6bd4
7 changed files with 71 additions and 99 deletions

View File

@ -49,6 +49,9 @@ type ProfileOpts struct {
// - dump out the stack trace of current go routines.
// sentryctl -pid <pid> pprof-goroutine
type Profile struct {
// Kernel is the kernel under profile. It's immutable.
Kernel *kernel.Kernel
// mu protects the fields below.
mu sync.Mutex
@ -57,9 +60,6 @@ type Profile struct {
// traceFile is the current execution trace output file.
traceFile *fd.FD
// Kernel is the kernel under profile.
Kernel *kernel.Kernel
}
// StartCPUProfile is an RPC stub which starts recording the CPU profile in a

View File

@ -49,17 +49,16 @@ type Profile interface {
// should have --profile set as an option in /etc/docker/daemon.json in
// order for profiling to work with Pprof.
type Pprof struct {
BasePath string // path to put profiles
BlockProfile bool
CPUProfile bool
GoRoutineProfile bool
HeapProfile bool
MutexProfile bool
Duration time.Duration // duration to run profiler e.g. '10s' or '1m'.
shouldRun bool
cmd *exec.Cmd
stdout io.ReadCloser
stderr io.ReadCloser
BasePath string // path to put profiles
BlockProfile bool
CPUProfile bool
HeapProfile bool
MutexProfile bool
Duration time.Duration // duration to run profiler e.g. '10s' or '1m'.
shouldRun bool
cmd *exec.Cmd
stdout io.ReadCloser
stderr io.ReadCloser
}
// MakePprofFromFlags makes a Pprof profile from flags.
@ -68,13 +67,12 @@ func MakePprofFromFlags(c *Container) *Pprof {
return nil
}
return &Pprof{
BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.Name),
BlockProfile: *pprofBlock,
CPUProfile: *pprofCPU,
GoRoutineProfile: *pprofGo,
HeapProfile: *pprofHeap,
MutexProfile: *pprofMutex,
Duration: *duration,
BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.Name),
BlockProfile: *pprofBlock,
CPUProfile: *pprofCPU,
HeapProfile: *pprofHeap,
MutexProfile: *pprofMutex,
Duration: *duration,
}
}
@ -138,9 +136,6 @@ func (p *Pprof) makeProfileArgs(c *Container) []string {
if p.CPUProfile {
ret = append(ret, fmt.Sprintf("--profile-cpu=%s", filepath.Join(p.BasePath, "cpu.pprof")))
}
if p.GoRoutineProfile {
ret = append(ret, fmt.Sprintf("--profile-goroutine=%s", filepath.Join(p.BasePath, "go.pprof")))
}
if p.HeapProfile {
ret = append(ret, fmt.Sprintf("--profile-heap=%s", filepath.Join(p.BasePath, "heap.pprof")))
}

View File

@ -51,13 +51,12 @@ func TestPprof(t *testing.T) {
{
name: "All",
pprof: Pprof{
BasePath: basePath,
BlockProfile: true,
CPUProfile: true,
GoRoutineProfile: true,
HeapProfile: true,
MutexProfile: true,
Duration: 2 * time.Second,
BasePath: basePath,
BlockProfile: true,
CPUProfile: true,
HeapProfile: true,
MutexProfile: true,
Duration: 2 * time.Second,
},
expectedFiles: []string{block, cpu, goprofle, heap, mutex},
},

View File

@ -101,14 +101,13 @@ const (
// Profiling related commands (see pprof.go for more details).
const (
StartCPUProfile = "Profile.StartCPUProfile"
StopCPUProfile = "Profile.StopCPUProfile"
HeapProfile = "Profile.HeapProfile"
GoroutineProfile = "Profile.GoroutineProfile"
BlockProfile = "Profile.BlockProfile"
MutexProfile = "Profile.MutexProfile"
StartTrace = "Profile.StartTrace"
StopTrace = "Profile.StopTrace"
StartCPUProfile = "Profile.StartCPUProfile"
StopCPUProfile = "Profile.StopCPUProfile"
HeapProfile = "Profile.HeapProfile"
BlockProfile = "Profile.BlockProfile"
MutexProfile = "Profile.MutexProfile"
StartTrace = "Profile.StartTrace"
StopTrace = "Profile.StopTrace"
)
// Logging related commands (see logging.go for more details).
@ -129,42 +128,52 @@ type controller struct {
// manager holds the containerManager methods.
manager *containerManager
// pprop holds the profile instance if enabled. It may be nil.
pprof *control.Profile
}
// newController creates a new controller. The caller must call
// controller.srv.StartServing() to start the controller.
func newController(fd int, l *Loader) (*controller, error) {
srv, err := server.CreateFromFD(fd)
ctrl := &controller{}
var err error
ctrl.srv, err = server.CreateFromFD(fd)
if err != nil {
return nil, err
}
manager := &containerManager{
ctrl.manager = &containerManager{
startChan: make(chan struct{}),
startResultChan: make(chan error),
l: l,
}
srv.Register(manager)
ctrl.srv.Register(ctrl.manager)
if eps, ok := l.k.RootNetworkNamespace().Stack().(*netstack.Stack); ok {
net := &Network{
Stack: eps.Stack,
}
srv.Register(net)
ctrl.srv.Register(net)
}
srv.Register(&debug{})
srv.Register(&control.Logging{})
ctrl.srv.Register(&debug{})
ctrl.srv.Register(&control.Logging{})
if l.root.conf.ProfileEnable {
srv.Register(&control.Profile{
Kernel: l.k,
})
ctrl.pprof = &control.Profile{Kernel: l.k}
ctrl.srv.Register(ctrl.pprof)
}
return &controller{
srv: srv,
manager: manager,
}, nil
return ctrl, nil
}
func (c *controller) stop() {
if c.pprof != nil {
// These are noop if there is nothing being profiled.
_ = c.pprof.StopCPUProfile(nil, nil)
_ = c.pprof.StopTrace(nil, nil)
}
}
// containerManager manages sandbox containers.

View File

@ -1008,6 +1008,9 @@ func (l *Loader) WaitExit() kernel.ExitStatus {
// Wait for container.
l.k.WaitExited()
// Cleanup
l.ctrl.stop()
return l.k.GlobalInit().ExitStatus()
}

View File

@ -32,20 +32,19 @@ import (
// Debug implements subcommands.Command for the "debug" command.
type Debug struct {
pid int
stacks bool
signal int
profileHeap string
profileCPU string
profileGoroutine string
profileBlock string
profileMutex string
trace string
strace string
logLevel string
logPackets string
duration time.Duration
ps bool
pid int
stacks bool
signal int
profileHeap string
profileCPU string
profileBlock string
profileMutex string
trace string
strace string
logLevel string
logPackets string
duration time.Duration
ps bool
}
// Name implements subcommands.Command.
@ -69,7 +68,6 @@ func (d *Debug) SetFlags(f *flag.FlagSet) {
f.BoolVar(&d.stacks, "stacks", false, "if true, dumps all sandbox stacks to the log")
f.StringVar(&d.profileHeap, "profile-heap", "", "writes heap profile to the given file.")
f.StringVar(&d.profileCPU, "profile-cpu", "", "writes CPU profile to the given file.")
f.StringVar(&d.profileGoroutine, "profile-goroutine", "", "writes goroutine profile to the given file.")
f.StringVar(&d.profileBlock, "profile-block", "", "writes block profile to the given file.")
f.StringVar(&d.profileMutex, "profile-mutex", "", "writes mutex profile to the given file.")
f.DurationVar(&d.duration, "duration", time.Second, "amount of time to wait for CPU and trace profiles")
@ -153,18 +151,6 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
}
log.Infof("Heap profile written to %q", d.profileHeap)
}
if d.profileGoroutine != "" {
f, err := os.Create(d.profileGoroutine)
if err != nil {
return Errorf(err.Error())
}
defer f.Close()
if err := c.Sandbox.GoroutineProfile(f); err != nil {
return Errorf(err.Error())
}
log.Infof("Goroutine profile written to %q", d.profileGoroutine)
}
if d.profileBlock != "" {
f, err := os.Create(d.profileBlock)
if err != nil {

View File

@ -1012,26 +1012,6 @@ func (s *Sandbox) StopCPUProfile() error {
return nil
}
// GoroutineProfile writes a goroutine profile to the given file.
func (s *Sandbox) GoroutineProfile(f *os.File) error {
log.Debugf("Goroutine profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.GoroutineProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q goroutine profile: %v", s.ID, err)
}
return nil
}
// BlockProfile writes a block profile to the given file.
func (s *Sandbox) BlockProfile(f *os.File) error {
log.Debugf("Block profile %q", s.ID)