Rename DowngradableRWMutex to RWmutex.

Also renames TMutex to Mutex.

These custom mutexes aren't any worse than the standard library versions (same
code), so having both seems redundant.

PiperOrigin-RevId: 290873587
This commit is contained in:
Ian Gudger 2020-01-21 19:23:26 -08:00 committed by gVisor bot
parent d0e75f2bef
commit 6a59e7f510
7 changed files with 49 additions and 55 deletions

View File

@ -198,7 +198,7 @@ type overlayEntry struct {
upper *Inode
// dirCacheMu protects dirCache.
dirCacheMu sync.DowngradableRWMutex `state:"nosave"`
dirCacheMu sync.RWMutex `state:"nosave"`
// dirCache is cache of DentAttrs from upper and lower Inodes.
dirCache *SortedDentryMap

View File

@ -80,7 +80,7 @@ type MemoryManager struct {
users int32
// mappingMu is analogous to Linux's struct mm_struct::mmap_sem.
mappingMu sync.DowngradableRWMutex `state:"nosave"`
mappingMu sync.RWMutex `state:"nosave"`
// vmas stores virtual memory areas. Since vmas are stored by value,
// clients should usually use vmaIterator.ValuePtr() instead of
@ -123,7 +123,7 @@ type MemoryManager struct {
// activeMu is loosely analogous to Linux's struct
// mm_struct::page_table_lock.
activeMu sync.DowngradableRWMutex `state:"nosave"`
activeMu sync.RWMutex `state:"nosave"`
// pmas stores platform mapping areas used to implement vmas. Since pmas
// are stored by value, clients should usually use pmaIterator.ValuePtr()

View File

@ -11,12 +11,6 @@ import (
// Aliases of standard library types.
type (
// Mutex is an alias of sync.Mutex.
Mutex = sync.Mutex
// RWMutex is an alias of sync.RWMutex.
RWMutex = sync.RWMutex
// Cond is an alias of sync.Cond.
Cond = sync.Cond

View File

@ -18,7 +18,7 @@ import (
"testing"
)
func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) {
func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
m.RLock()
clocked <- true
<-cunlock
@ -28,7 +28,7 @@ func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) {
func doTestParallelReaders(numReaders, gomaxprocs int) {
runtime.GOMAXPROCS(gomaxprocs)
var m DowngradableRWMutex
var m RWMutex
clocked := make(chan bool)
cunlock := make(chan bool)
cdone := make(chan bool)
@ -55,7 +55,7 @@ func TestParallelReaders(t *testing.T) {
doTestParallelReaders(4, 2)
}
func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.RLock()
n := atomic.AddInt32(activity, 1)
@ -70,7 +70,7 @@ func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone
cdone <- true
}
func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.Lock()
n := atomic.AddInt32(activity, 10000)
@ -85,7 +85,7 @@ func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone
cdone <- true
}
func downgradingWriter(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.Lock()
n := atomic.AddInt32(activity, 10000)
@ -112,7 +112,7 @@ func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) {
runtime.GOMAXPROCS(gomaxprocs)
// Number of active readers + 10000 * number of active writers.
var activity int32
var rwm DowngradableRWMutex
var rwm RWMutex
cdone := make(chan bool)
go writer(&rwm, numIterations, &activity, cdone)
go downgradingWriter(&rwm, numIterations, &activity, cdone)
@ -150,56 +150,56 @@ func TestDowngradableRWMutex(t *testing.T) {
}
func TestRWDoubleTryLock(t *testing.T) {
var m DowngradableRWMutex
if !m.TryLock() {
var rwm RWMutex
if !rwm.TryLock() {
t.Fatal("failed to aquire lock")
}
if m.TryLock() {
if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestRWTryLockAfterLock(t *testing.T) {
var m DowngradableRWMutex
m.Lock()
if m.TryLock() {
var rwm RWMutex
rwm.Lock()
if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestRWTryLockUnlock(t *testing.T) {
var m DowngradableRWMutex
if !m.TryLock() {
var rwm RWMutex
if !rwm.TryLock() {
t.Fatal("failed to aquire lock")
}
m.Unlock()
if !m.TryLock() {
rwm.Unlock()
if !rwm.TryLock() {
t.Fatal("failed to aquire lock after unlock")
}
}
func TestTryRLockAfterLock(t *testing.T) {
var m DowngradableRWMutex
m.Lock()
if m.TryRLock() {
var rwm RWMutex
rwm.Lock()
if rwm.TryRLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestTryLockAfterRLock(t *testing.T) {
var m DowngradableRWMutex
m.RLock()
if m.TryLock() {
var rwm RWMutex
rwm.RLock()
if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestDoubleTryRLock(t *testing.T) {
var m DowngradableRWMutex
if !m.TryRLock() {
var rwm RWMutex
if !rwm.TryRLock() {
t.Fatal("failed to aquire lock")
}
if !m.TryRLock() {
if !rwm.TryRLock() {
t.Fatal("failed to read aquire read locked lock")
}
}

View File

@ -29,10 +29,10 @@ func runtimeSemacquire(s *uint32)
//go:linkname runtimeSemrelease sync.runtime_Semrelease
func runtimeSemrelease(s *uint32, handoff bool, skipframes int)
// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock,
// RWMutex is identical to sync.RWMutex, but adds the DowngradeLock,
// TryLock and TryRLock methods.
type DowngradableRWMutex struct {
w TMutex // held if there are pending writers
type RWMutex struct {
w Mutex // held if there are pending writers
writerSem uint32 // semaphore for writers to wait for completing readers
readerSem uint32 // semaphore for readers to wait for completing writers
readerCount int32 // number of pending readers
@ -43,7 +43,7 @@ const rwmutexMaxReaders = 1 << 30
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
func (rw *DowngradableRWMutex) TryRLock() bool {
func (rw *RWMutex) TryRLock() bool {
if RaceEnabled {
RaceDisable()
}
@ -67,7 +67,7 @@ func (rw *DowngradableRWMutex) TryRLock() bool {
}
// RLock locks rw for reading.
func (rw *DowngradableRWMutex) RLock() {
func (rw *RWMutex) RLock() {
if RaceEnabled {
RaceDisable()
}
@ -82,14 +82,14 @@ func (rw *DowngradableRWMutex) RLock() {
}
// RUnlock undoes a single RLock call.
func (rw *DowngradableRWMutex) RUnlock() {
func (rw *RWMutex) RUnlock() {
if RaceEnabled {
RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
RaceDisable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
panic("RUnlock of unlocked DowngradableRWMutex")
panic("RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
@ -104,7 +104,7 @@ func (rw *DowngradableRWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
func (rw *DowngradableRWMutex) TryLock() bool {
func (rw *RWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
}
@ -131,7 +131,7 @@ func (rw *DowngradableRWMutex) TryLock() bool {
}
// Lock locks rw for writing.
func (rw *DowngradableRWMutex) Lock() {
func (rw *RWMutex) Lock() {
if RaceEnabled {
RaceDisable()
}
@ -150,7 +150,7 @@ func (rw *DowngradableRWMutex) Lock() {
}
// Unlock unlocks rw for writing.
func (rw *DowngradableRWMutex) Unlock() {
func (rw *RWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
RaceRelease(unsafe.Pointer(&rw.readerSem))
@ -159,7 +159,7 @@ func (rw *DowngradableRWMutex) Unlock() {
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
panic("Unlock of unlocked DowngradableRWMutex")
panic("Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
@ -173,7 +173,7 @@ func (rw *DowngradableRWMutex) Unlock() {
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
func (rw *DowngradableRWMutex) DowngradeLock() {
func (rw *RWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
@ -181,7 +181,7 @@ func (rw *DowngradableRWMutex) DowngradeLock() {
// Announce to readers there is no active writer and one additional reader.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
if r >= rwmutexMaxReaders+1 {
panic("DowngradeLock of unlocked DowngradableRWMutex")
panic("DowngradeLock of unlocked RWMutex")
}
// Unblock blocked readers, if any. Note that this loop starts as 1 since r
// includes this goroutine.

View File

@ -30,7 +30,7 @@ func TestStructSize(t *testing.T) {
//
// The correctness of this package relies on these remaining in sync.
func TestFieldValues(t *testing.T) {
var m TMutex
var m Mutex
m.Lock()
if got := *m.state(); got != mutexLocked {
t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked)
@ -42,7 +42,7 @@ func TestFieldValues(t *testing.T) {
}
func TestDoubleTryLock(t *testing.T) {
var m TMutex
var m Mutex
if !m.TryLock() {
t.Fatal("failed to aquire lock")
}
@ -52,7 +52,7 @@ func TestDoubleTryLock(t *testing.T) {
}
func TestTryLockAfterLock(t *testing.T) {
var m TMutex
var m Mutex
m.Lock()
if m.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
@ -60,7 +60,7 @@ func TestTryLockAfterLock(t *testing.T) {
}
func TestTryLockUnlock(t *testing.T) {
var m TMutex
var m Mutex
if !m.TryLock() {
t.Fatal("failed to aquire lock")
}

View File

@ -17,8 +17,8 @@ import (
"unsafe"
)
// TMutex is a try lock.
type TMutex struct {
// Mutex is a try lock.
type Mutex struct {
sync.Mutex
}
@ -27,7 +27,7 @@ type syncMutex struct {
sema uint32
}
func (m *TMutex) state() *int32 {
func (m *Mutex) state() *int32 {
return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state
}
@ -38,7 +38,7 @@ const (
// TryLock tries to aquire the mutex. It returns true if it succeeds and false
// otherwise. TryLock does not block.
func (m *TMutex) TryLock() bool {
func (m *Mutex) TryLock() bool {
if atomic.CompareAndSwapInt32(m.state(), mutexUnlocked, mutexLocked) {
if RaceEnabled {
RaceAcquire(unsafe.Pointer(&m.Mutex))