netstack: make listening tcp socket close state setting and cleanup atomic.

Otherwise the socket saving logic might find workers still running for closed
sockets unexpectedly.

PiperOrigin-RevId: 210018905
Change-Id: I443a04d355613f5f9983252cc6863bff6e0eda3a
This commit is contained in:
Zhaozhong Ni 2018-08-23 16:13:22 -07:00 committed by Shentubot
parent ba8f6ba8c8
commit e855e9cebc
1 changed files with 3 additions and 5 deletions

View File

@ -363,11 +363,6 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error {
e.mu.Lock() e.mu.Lock()
e.state = stateClosed e.state = stateClosed
// Notify waiters that the endpoint is shutdown.
e.mu.Unlock()
e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)
e.mu.Lock()
// Do cleanup if needed. // Do cleanup if needed.
e.completeWorkerLocked() e.completeWorkerLocked()
@ -375,6 +370,9 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error {
close(e.drainDone) close(e.drainDone)
} }
e.mu.Unlock() e.mu.Unlock()
// Notify waiters that the endpoint is shutdown.
e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)
}() }()
e.mu.Lock() e.mu.Lock()