2021-09-11 20:12:47 +01:00
|
|
|
package mutexes
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
2023-11-27 13:20:18 +00:00
|
|
|
"sync/atomic"
|
2023-10-31 11:12:22 +00:00
|
|
|
"unsafe"
|
2021-09-11 20:12:47 +01:00
|
|
|
)
|
|
|
|
|
2022-01-24 16:35:13 +00:00
|
|
|
const (
|
|
|
|
// possible lock types.
|
2022-03-08 11:56:53 +00:00
|
|
|
lockTypeRead = uint8(1) << 0
|
|
|
|
lockTypeWrite = uint8(1) << 1
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// frequency of GC cycles
|
|
|
|
// per no. unlocks. i.e.
|
|
|
|
// every 'gcfreq' unlocks.
|
|
|
|
gcfreq = 1024
|
2022-01-24 16:35:13 +00:00
|
|
|
)
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// MutexMap is a structure that allows read / write locking
|
|
|
|
// per key, performing as you'd expect a map[string]*RWMutex
|
|
|
|
// to perform, without you needing to worry about deadlocks
|
|
|
|
// between competing read / write locks and the map's own mutex.
|
|
|
|
// It uses memory pooling for the internal "mutex" (ish) types
|
|
|
|
// and performs self-eviction of keys.
|
2022-03-08 11:56:53 +00:00
|
|
|
//
|
2023-10-31 11:12:22 +00:00
|
|
|
// Under the hood this is achieved using a single mutex for the
|
2023-11-27 13:20:18 +00:00
|
|
|
// map, state tracking for individual keys, and some sync.Cond{}
|
|
|
|
// like structures for sleeping / awaking awaiting goroutines.
|
2021-09-13 09:33:01 +01:00
|
|
|
type MutexMap struct {
|
2023-10-31 11:12:22 +00:00
|
|
|
mapmu sync.Mutex
|
2023-11-27 13:20:18 +00:00
|
|
|
mumap map[string]*rwmutex
|
2023-10-31 11:12:22 +00:00
|
|
|
mupool rwmutexPool
|
|
|
|
count uint32
|
2021-09-11 20:12:47 +01:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// checkInit ensures MutexMap is initialized (UNSAFE).
|
|
|
|
func (mm *MutexMap) checkInit() {
|
|
|
|
if mm.mumap == nil {
|
2023-11-27 13:20:18 +00:00
|
|
|
mm.mumap = make(map[string]*rwmutex)
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
2023-10-31 11:12:22 +00:00
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Lock acquires a write lock on key in map, returning unlock function.
|
|
|
|
func (mm *MutexMap) Lock(key string) func() {
|
|
|
|
return mm.lock(key, lockTypeWrite)
|
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// RLock acquires a read lock on key in map, returning runlock function.
|
|
|
|
func (mm *MutexMap) RLock(key string) func() {
|
|
|
|
return mm.lock(key, lockTypeRead)
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
func (mm *MutexMap) lock(key string, lt uint8) func() {
|
|
|
|
// Perform first map lock
|
|
|
|
// and check initialization
|
|
|
|
// OUTSIDE the main loop.
|
2022-03-08 11:56:53 +00:00
|
|
|
mm.mapmu.Lock()
|
2023-10-31 11:12:22 +00:00
|
|
|
mm.checkInit()
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2022-01-24 16:35:13 +00:00
|
|
|
for {
|
2023-10-31 11:12:22 +00:00
|
|
|
// Check map for mu.
|
|
|
|
mu := mm.mumap[key]
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
if mu == nil {
|
|
|
|
// Allocate new mutex.
|
|
|
|
mu = mm.mupool.Acquire()
|
|
|
|
mm.mumap[key] = mu
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
if !mu.Lock(lt) {
|
|
|
|
// Wait on mutex unlock, after
|
|
|
|
// immediately relocking map mu.
|
|
|
|
mu.WaitRelock(&mm.mapmu)
|
|
|
|
continue
|
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Done with map.
|
2022-03-08 11:56:53 +00:00
|
|
|
mm.mapmu.Unlock()
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Return mutex unlock function.
|
|
|
|
return func() { mm.unlock(key, mu) }
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
func (mm *MutexMap) unlock(key string, mu *rwmutex) {
|
2023-10-31 11:12:22 +00:00
|
|
|
// Get map lock.
|
|
|
|
mm.mapmu.Lock()
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Unlock mutex.
|
|
|
|
if mu.Unlock() {
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Mutex fully unlocked
|
|
|
|
// with zero waiters. Self
|
|
|
|
// evict and release it.
|
|
|
|
delete(mm.mumap, key)
|
|
|
|
mm.mupool.Release(mu)
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
if mm.count++; mm.count%gcfreq == 0 {
|
|
|
|
// Every 'gcfreq' unlocks perform
|
|
|
|
// a garbage collection to keep
|
|
|
|
// us squeaky clean :]
|
|
|
|
mm.mupool.GC()
|
2021-09-11 20:12:47 +01:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Done with map.
|
2022-03-08 11:56:53 +00:00
|
|
|
mm.mapmu.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// rwmutexPool is a very simply memory rwmutexPool.
|
|
|
|
type rwmutexPool struct {
|
2023-11-27 13:20:18 +00:00
|
|
|
current []*rwmutex
|
|
|
|
victim []*rwmutex
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
2021-09-11 20:12:47 +01:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Acquire will returns a rwmutexState from rwmutexPool (or alloc new).
|
2023-11-27 13:20:18 +00:00
|
|
|
func (p *rwmutexPool) Acquire() *rwmutex {
|
2023-10-31 11:12:22 +00:00
|
|
|
// First try the current queue
|
|
|
|
if l := len(p.current) - 1; l >= 0 {
|
|
|
|
mu := p.current[l]
|
|
|
|
p.current = p.current[:l]
|
|
|
|
return mu
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
2021-09-11 20:12:47 +01:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Next try the victim queue.
|
|
|
|
if l := len(p.victim) - 1; l >= 0 {
|
|
|
|
mu := p.victim[l]
|
|
|
|
p.victim = p.victim[:l]
|
|
|
|
return mu
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
2022-01-24 16:35:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Lastly, alloc new.
|
2023-11-27 13:20:18 +00:00
|
|
|
mu := new(rwmutex)
|
2023-10-31 11:12:22 +00:00
|
|
|
return mu
|
2021-09-11 20:12:47 +01:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Release places a sync.rwmutexState back in the rwmutexPool.
|
2023-11-27 13:20:18 +00:00
|
|
|
func (p *rwmutexPool) Release(mu *rwmutex) {
|
2023-10-31 11:12:22 +00:00
|
|
|
p.current = append(p.current, mu)
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
2021-09-11 20:12:47 +01:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// GC will clear out unused entries from the rwmutexPool.
|
|
|
|
func (p *rwmutexPool) GC() {
|
|
|
|
current := p.current
|
|
|
|
p.current = nil
|
|
|
|
p.victim = current
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// rwmutex represents a RW mutex when used correctly within
|
|
|
|
// a MapMutex. It should ONLY be access when protected by
|
|
|
|
// the outer map lock, except for the 'notifyList' which is
|
|
|
|
// a runtime internal structure borrowed from the sync.Cond{}.
|
|
|
|
//
|
|
|
|
// this functions very similarly to a sync.Cond{}, but with
|
|
|
|
// lock state tracking, and returning on 'Broadcast()' whether
|
|
|
|
// any goroutines were actually awoken. it also has a less
|
|
|
|
// confusing API than sync.Cond{} with the outer locking
|
|
|
|
// mechanism we use, otherwise all Cond{}.L would reference
|
|
|
|
// the same outer map mutex.
|
|
|
|
type rwmutex struct {
|
|
|
|
n notifyList // 'trigger' mechanism
|
|
|
|
l int32 // no. locks
|
|
|
|
t uint8 // lock type
|
2022-01-24 16:35:13 +00:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Lock will lock the mutex for given lock type, in the
|
|
|
|
// sense that it will update the internal state tracker
|
|
|
|
// accordingly. Return value is true on successful lock.
|
2023-11-27 13:20:18 +00:00
|
|
|
func (mu *rwmutex) Lock(lt uint8) bool {
|
|
|
|
switch mu.t {
|
2023-10-31 11:12:22 +00:00
|
|
|
case lockTypeRead:
|
|
|
|
// already read locked,
|
|
|
|
// only permit more reads.
|
|
|
|
if lt != lockTypeRead {
|
2022-03-08 11:56:53 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
case lockTypeWrite:
|
|
|
|
// already write locked,
|
|
|
|
// no other locks allowed.
|
|
|
|
return false
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
default:
|
2023-11-27 13:20:18 +00:00
|
|
|
// Fully unlocked,
|
|
|
|
// set incoming type.
|
|
|
|
mu.t = lt
|
2023-10-31 11:12:22 +00:00
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// Update
|
|
|
|
// count.
|
2023-11-27 13:20:18 +00:00
|
|
|
mu.l++
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
return true
|
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// Unlock will unlock the mutex, in the sense that it
|
|
|
|
// will update the internal state tracker accordingly.
|
|
|
|
// On totally unlocked state, it will awaken all
|
|
|
|
// sleeping goroutines waiting on this mutex.
|
|
|
|
func (mu *rwmutex) Unlock() bool {
|
|
|
|
switch mu.l--; {
|
|
|
|
case mu.l > 0 && mu.t == lockTypeWrite:
|
2023-10-31 11:12:22 +00:00
|
|
|
panic("BUG: multiple writer locks")
|
2023-11-27 13:20:18 +00:00
|
|
|
case mu.l < 0:
|
2023-10-31 11:12:22 +00:00
|
|
|
panic("BUG: negative lock count")
|
2023-11-27 13:20:18 +00:00
|
|
|
|
|
|
|
case mu.l == 0:
|
2023-10-31 11:12:22 +00:00
|
|
|
// Fully unlocked.
|
2023-11-27 13:20:18 +00:00
|
|
|
mu.t = 0
|
2023-10-31 11:12:22 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// Awake all blocked goroutines and check
|
|
|
|
// for change in the last notified ticket.
|
|
|
|
before := atomic.LoadUint32(&mu.n.notify)
|
|
|
|
runtime_notifyListNotifyAll(&mu.n)
|
|
|
|
after := atomic.LoadUint32(&mu.n.notify)
|
2021-09-11 20:12:47 +01:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// If ticket changed, this indicates
|
|
|
|
// AT LEAST one goroutine was awoken.
|
|
|
|
//
|
|
|
|
// (before != after) => (waiters > 0)
|
|
|
|
// (before == after) => (waiters = 0)
|
|
|
|
return (before == after)
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
default:
|
|
|
|
// i.e. mutex still
|
|
|
|
// locked by others.
|
|
|
|
return false
|
|
|
|
}
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// WaitRelock expects a mutex to be passed in, already in the
|
|
|
|
// locked state. It incr the notifyList waiter count before
|
|
|
|
// unlocking the outer mutex and blocking on notifyList wait.
|
|
|
|
// On awake it will decr wait count and relock outer mutex.
|
|
|
|
func (mu *rwmutex) WaitRelock(outer *sync.Mutex) {
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// add ourselves to list while still
|
|
|
|
// under protection of outer map lock.
|
|
|
|
t := runtime_notifyListAdd(&mu.n)
|
|
|
|
|
|
|
|
// Finished with
|
|
|
|
// outer map lock.
|
|
|
|
outer.Unlock()
|
|
|
|
|
|
|
|
// Block until awoken by another
|
|
|
|
// goroutine within mu.Unlock().
|
|
|
|
runtime_notifyListWait(&mu.n, t)
|
2022-03-08 11:56:53 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// Relock!
|
|
|
|
outer.Lock()
|
2023-10-31 11:12:22 +00:00
|
|
|
}
|
2022-11-05 12:13:07 +00:00
|
|
|
|
2023-11-27 13:20:18 +00:00
|
|
|
// unused fields left
|
|
|
|
// un-named for safety.
|
2023-10-31 11:12:22 +00:00
|
|
|
type notifyList struct {
|
2023-11-27 13:20:18 +00:00
|
|
|
_ uint32 // wait uint32
|
|
|
|
notify uint32 // notify uint32
|
|
|
|
_ uintptr // lock mutex
|
|
|
|
_ unsafe.Pointer // head *sudog
|
|
|
|
_ unsafe.Pointer // tail *sudog
|
2022-03-08 11:56:53 +00:00
|
|
|
}
|
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// See runtime/sema.go for documentation.
|
|
|
|
//
|
|
|
|
//go:linkname runtime_notifyListAdd sync.runtime_notifyListAdd
|
|
|
|
func runtime_notifyListAdd(l *notifyList) uint32
|
2023-01-11 11:13:13 +00:00
|
|
|
|
2023-10-31 11:12:22 +00:00
|
|
|
// See runtime/sema.go for documentation.
|
|
|
|
//
|
|
|
|
//go:linkname runtime_notifyListWait sync.runtime_notifyListWait
|
|
|
|
func runtime_notifyListWait(l *notifyList, t uint32)
|
|
|
|
|
|
|
|
// See runtime/sema.go for documentation.
|
|
|
|
//
|
|
|
|
//go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll
|
|
|
|
func runtime_notifyListNotifyAll(l *notifyList)
|
2023-11-27 13:20:18 +00:00
|
|
|
|
|
|
|
// Ensure that sync and runtime agree on size of notifyList.
|
|
|
|
//
|
|
|
|
//go:linkname runtime_notifyListCheck sync.runtime_notifyListCheck
|
|
|
|
func runtime_notifyListCheck(size uintptr)
|
|
|
|
func init() {
|
|
|
|
var n notifyList
|
|
|
|
runtime_notifyListCheck(unsafe.Sizeof(n))
|
|
|
|
}
|