Fix data race in fs.Async.

Replaces the WaitGroup with a RWMutex. Calls to Async hold the mutex for
reading, while AsyncBarrier takes the lock for writing. This ensures that all
executing Async work finishes before AsyncBarrier returns.

Also pushes the Async() call from Inode.Release into
gofer/InodeOperations.Release(). This removes a recursive Async call which
should not have been allowed in the first place. The gofer Release call is the
slow one (since it may make RPCs to the gofer), so putting the Async call there
makes sense.

PiperOrigin-RevId: 223093067
Change-Id: I116da7b20fce5ebab8d99c2ab0f27db7c89d890e
This commit is contained in:
Nicolas Lacasse 2018-11-27 18:16:18 -08:00 committed by Shentubot
parent 5bd02b224f
commit 573622fdca
3 changed files with 24 additions and 21 deletions

View File

@ -60,10 +60,11 @@ import (
)
var (
// work is a sync.WaitGroup that can be used to queue asynchronous
// operations via Do. Callers can use Barrier to ensure no operations
// are outstanding.
work sync.WaitGroup
// workMu is used to synchronize pending asynchronous work. Async work
// runs with the lock held for reading. AsyncBarrier will take the lock
// for writing, thus ensuring that all Async work completes before
// AsyncBarrier returns.
workMu sync.RWMutex
// asyncError is used to store up to one asynchronous execution error.
asyncError = make(chan error, 1)
@ -71,14 +72,17 @@ var (
// AsyncBarrier waits for all outstanding asynchronous work to complete.
func AsyncBarrier() {
work.Wait()
workMu.Lock()
workMu.Unlock()
}
// Async executes a function asynchronously.
//
// Async must not be called recursively.
func Async(f func()) {
work.Add(1)
go func() { // S/R-SAFE: Barrier must be called.
defer work.Done() // Ensure Done in case of panic.
workMu.RLock()
go func() { // S/R-SAFE: AsyncBarrier must be called.
defer workMu.RUnlock() // Ensure RUnlock in case of panic.
f()
}()
}
@ -89,7 +93,7 @@ func Async(f func()) {
func AsyncErrorBarrier() error {
wait := make(chan struct{}, 1)
go func() { // S/R-SAFE: Does not touch persistent state.
work.Wait()
AsyncBarrier()
wait <- struct{}{}
}()
select {

View File

@ -333,8 +333,14 @@ func (i *inodeOperations) session() *session {
// Release implements fs.InodeOperations.Release.
func (i *inodeOperations) Release(ctx context.Context) {
i.fileState.Release(ctx)
i.cachingInodeOps.Release()
// Releasing the fileState may make RPCs to the gofer. There is
// no need to wait for those to return, so we can do this
// asynchronously.
fs.Async(func() {
i.fileState.Release(ctx)
})
}
// Mappable implements fs.InodeOperations.Mappable.

View File

@ -110,20 +110,13 @@ func (i *Inode) destroy() {
// wouldn't be in the destructor.
i.Watches.targetDestroyed()
// Overlay resources should be released synchronously, since they may
// trigger more Inode.destroy calls which must themselves be handled
// synchronously, like the WriteOut call above.
if i.overlay != nil {
i.overlay.release()
i.MountSource.DecRef()
return
} else {
i.InodeOperations.Release(ctx)
}
// Regular (non-overlay) resources may be released asynchronously.
Async(func() {
i.InodeOperations.Release(ctx)
i.MountSource.DecRef()
})
}
// Mappable calls i.InodeOperations.Mappable.