Merge release-20201109.0-89-g3454d5721 (automated)
This commit is contained in:
commit
a8329e99c5
|
@ -1,6 +0,0 @@
|
|||
// automatically generated by stateify.
|
||||
|
||||
// +build go1.12
|
||||
// +build !go1.17
|
||||
|
||||
package goid
|
|
@ -117,7 +117,7 @@ type inodeFileState struct {
|
|||
// loading is acquired when the inodeFileState begins an asynchronous
|
||||
// load. It releases when the load is complete. Callers that require all
|
||||
// state to be available should call waitForLoad() to ensure that.
|
||||
loading sync.Mutex `state:".(struct{})"`
|
||||
loading sync.CrossGoroutineMutex `state:".(struct{})"`
|
||||
|
||||
// savedUAttr is only allocated during S/R. It points to the save-time
|
||||
// unstable attributes and is used to validate restore-time ones.
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !checklocks
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func noteLock(l unsafe.Pointer) {
|
||||
}
|
||||
|
||||
func noteUnlock(l unsafe.Pointer) {
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2020 The gVisor Authors.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build checklocks
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/goid"
|
||||
)
|
||||
|
||||
// gLocks contains metadata about the locks held by a goroutine.
|
||||
type gLocks struct {
|
||||
locksHeld []unsafe.Pointer
|
||||
}
|
||||
|
||||
// map[goid int]*gLocks
|
||||
//
|
||||
// Each key may only be written by the G with the goid it refers to.
|
||||
//
|
||||
// Note that entries are not evicted when a G exit, causing unbounded growth
|
||||
// with new G creation / destruction. If this proves problematic, entries could
|
||||
// be evicted when no locks are held at the expense of more allocations when
|
||||
// taking top-level locks.
|
||||
var locksHeld sync.Map
|
||||
|
||||
func getGLocks() *gLocks {
|
||||
id := goid.Get()
|
||||
|
||||
var locks *gLocks
|
||||
if l, ok := locksHeld.Load(id); ok {
|
||||
locks = l.(*gLocks)
|
||||
} else {
|
||||
locks = &gLocks{
|
||||
// Initialize space for a few locks.
|
||||
locksHeld: make([]unsafe.Pointer, 0, 8),
|
||||
}
|
||||
locksHeld.Store(id, locks)
|
||||
}
|
||||
|
||||
return locks
|
||||
}
|
||||
|
||||
func noteLock(l unsafe.Pointer) {
|
||||
locks := getGLocks()
|
||||
|
||||
for _, lock := range locks.locksHeld {
|
||||
if lock == l {
|
||||
panic(fmt.Sprintf("Deadlock on goroutine %d! Double lock of %p: %+v", goid.Get(), l, locks))
|
||||
}
|
||||
}
|
||||
|
||||
// Commit only after checking for panic conditions so that this lock
|
||||
// isn't on the list if the above panic is recovered.
|
||||
locks.locksHeld = append(locks.locksHeld, l)
|
||||
}
|
||||
|
||||
func noteUnlock(l unsafe.Pointer) {
|
||||
locks := getGLocks()
|
||||
|
||||
if len(locks.locksHeld) == 0 {
|
||||
panic(fmt.Sprintf("Unlock of %p on goroutine %d without any locks held! All locks:\n%s", l, goid.Get(), dumpLocks()))
|
||||
}
|
||||
|
||||
// Search backwards since callers are most likely to unlock in LIFO order.
|
||||
length := len(locks.locksHeld)
|
||||
for i := length - 1; i >= 0; i-- {
|
||||
if l == locks.locksHeld[i] {
|
||||
copy(locks.locksHeld[i:length-1], locks.locksHeld[i+1:length])
|
||||
// Clear last entry to ensure addr can be GC'd.
|
||||
locks.locksHeld[length-1] = nil
|
||||
locks.locksHeld = locks.locksHeld[:length-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Unlock of %p on goroutine %d without matching lock! All locks:\n%s", l, goid.Get(), dumpLocks()))
|
||||
}
|
||||
|
||||
func dumpLocks() string {
|
||||
var s strings.Builder
|
||||
locksHeld.Range(func(key, value interface{}) bool {
|
||||
goid := key.(int64)
|
||||
locks := value.(*gLocks)
|
||||
|
||||
// N.B. accessing gLocks of another G is fundamentally racy.
|
||||
|
||||
fmt.Fprintf(&s, "goroutine %d:\n", goid)
|
||||
if len(locks.locksHeld) == 0 {
|
||||
fmt.Fprintf(&s, "\t<none>\n")
|
||||
}
|
||||
for _, lock := range locks.locksHeld {
|
||||
fmt.Fprintf(&s, "\t%p\n", lock)
|
||||
}
|
||||
fmt.Fprintf(&s, "\n")
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return s.String()
|
||||
}
|
|
@ -17,8 +17,9 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
// Mutex is a try lock.
|
||||
type Mutex struct {
|
||||
// CrossGoroutineMutex is equivalent to Mutex, but it need not be unlocked by a
|
||||
// the same goroutine that locked the mutex.
|
||||
type CrossGoroutineMutex struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -27,7 +28,7 @@ type syncMutex struct {
|
|||
sema uint32
|
||||
}
|
||||
|
||||
func (m *Mutex) state() *int32 {
|
||||
func (m *CrossGoroutineMutex) state() *int32 {
|
||||
return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state
|
||||
}
|
||||
|
||||
|
@ -36,9 +37,9 @@ const (
|
|||
mutexLocked = 1
|
||||
)
|
||||
|
||||
// TryLock tries to aquire the mutex. It returns true if it succeeds and false
|
||||
// TryLock tries to acquire the mutex. It returns true if it succeeds and false
|
||||
// otherwise. TryLock does not block.
|
||||
func (m *Mutex) TryLock() bool {
|
||||
func (m *CrossGoroutineMutex) TryLock() bool {
|
||||
if atomic.CompareAndSwapInt32(m.state(), mutexUnlocked, mutexLocked) {
|
||||
if RaceEnabled {
|
||||
RaceAcquire(unsafe.Pointer(&m.Mutex))
|
||||
|
@ -47,3 +48,43 @@ func (m *Mutex) TryLock() bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Mutex is a mutual exclusion lock. The zero value for a Mutex is an unlocked
|
||||
// mutex.
|
||||
//
|
||||
// A Mutex must not be copied after first use.
|
||||
//
|
||||
// A Mutex must be unlocked by the same goroutine that locked it. This
|
||||
// invariant is enforced with the 'checklocks' build tag.
|
||||
type Mutex struct {
|
||||
m CrossGoroutineMutex
|
||||
}
|
||||
|
||||
// Lock locks m. If the lock is already in use, the calling goroutine blocks
|
||||
// until the mutex is available.
|
||||
func (m *Mutex) Lock() {
|
||||
noteLock(unsafe.Pointer(m))
|
||||
m.m.Lock()
|
||||
}
|
||||
|
||||
// Unlock unlocks m.
|
||||
//
|
||||
// Preconditions:
|
||||
// * m is locked.
|
||||
// * m was locked by this goroutine.
|
||||
func (m *Mutex) Unlock() {
|
||||
noteUnlock(unsafe.Pointer(m))
|
||||
m.m.Unlock()
|
||||
}
|
||||
|
||||
// TryLock tries to acquire the mutex. It returns true if it succeeds and false
|
||||
// otherwise. TryLock does not block.
|
||||
func (m *Mutex) TryLock() bool {
|
||||
// Note lock first to enforce proper locking even if unsuccessful.
|
||||
noteLock(unsafe.Pointer(m))
|
||||
locked := m.m.TryLock()
|
||||
if !locked {
|
||||
noteUnlock(unsafe.Pointer(m))
|
||||
}
|
||||
return locked
|
||||
}
|
||||
|
|
|
@ -32,7 +32,12 @@ func runtimeSemrelease(s *uint32, handoff bool, skipframes int)
|
|||
// RWMutex is identical to sync.RWMutex, but adds the DowngradeLock,
|
||||
// TryLock and TryRLock methods.
|
||||
type RWMutex struct {
|
||||
w Mutex // held if there are pending writers
|
||||
// w is held if there are pending writers
|
||||
//
|
||||
// We use CrossGoroutineMutex rather than Mutex because the lock
|
||||
// annotation instrumentation in Mutex will trigger false positives in
|
||||
// the race detector when called inside of RaceDisable.
|
||||
w CrossGoroutineMutex
|
||||
writerSem uint32 // semaphore for writers to wait for completing readers
|
||||
readerSem uint32 // semaphore for readers to wait for completing writers
|
||||
readerCount int32 // number of pending readers
|
||||
|
|
|
@ -422,7 +422,10 @@ type endpoint struct {
|
|||
|
||||
// mu protects all endpoint fields unless documented otherwise. mu must
|
||||
// be acquired before interacting with the endpoint fields.
|
||||
mu sync.Mutex `state:"nosave"`
|
||||
//
|
||||
// During handshake, mu is locked by the protocol listen goroutine and
|
||||
// released by the handshake completion goroutine.
|
||||
mu sync.CrossGoroutineMutex `state:"nosave"`
|
||||
ownedByUser uint32
|
||||
|
||||
// state must be read/set using the EndpointState()/setEndpointState()
|
||||
|
|
Loading…
Reference in New Issue