Skip to content

Commit

Permalink
implement SharedShardCache
Browse files Browse the repository at this point in the history
  • Loading branch information
robaho committed Nov 25, 2018
1 parent 9a4a962 commit aed8afc
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 3 deletions.
36 changes: 36 additions & 0 deletions maps.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package go_concurrency

import (
"sync"
"sync/atomic"
)

func nextPowerOf2(v int) int {
Expand Down Expand Up @@ -104,6 +105,41 @@ func (m *ShardCache) Put(key int, value int) {
m.maps[key%10][key] = value
}

const SharedShardMask = 16 - 1

type SharedShardCache struct {
maps [16]atomic.Value
locks [16]sync.Mutex
}

func NewSharedShardCache() *SharedShardCache {
m := SharedShardCache{}
for i := 0; i < 16; i++ {
m.maps[i].Store(make(map[int]int))
}
return &m
}

func (m *SharedShardCache) Get(key int) int {
m0 := m.maps[key&SharedShardMask]
return m0.Load().(map[int]int)[key]
}

func (m *SharedShardCache) Put(key int, value int) {
lock := m.locks[key&SharedShardMask]
lock.Lock()
m0 := m.maps[key&SharedShardMask].Load().(map[int]int)
// make a new map and atomically store, this could be optimized because if
// the key already exists in the map, we can safely update the value in the
// main map and just restore to enforce the memory fence
m1 := make(map[int]int)
for k, v := range m0 {
m1[k] = v
}
m1[key] = value
m.maps[key&SharedShardMask].Store(m1)
}

type UnsharedCache map[int]int

func NewUnsharedCache() *UnsharedCache {
Expand Down
7 changes: 4 additions & 3 deletions maps_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@ import (
"time"
)

const NGOS = 2 // number of concurrent go routines for read/load tests
const NGOS = 8 // number of concurrent go routines for read/load tests
const Mask = (1024 * 1024) - 1

var um = go_concurrency.NewUnsharedCache()
var lm = go_concurrency.NewLockCache()
var sm = go_concurrency.NewSyncCache()
var cm = go_concurrency.NewChannelCache()
var sc = go_concurrency.NewShardCache()
var ssc = go_concurrency.NewSharedShardCache()
var im = go_concurrency.NewIntMap(256000) // so there are 4x collisions
var im2 = go_concurrency.NewIntMap(1000000) // so there are no collisions

Expand Down Expand Up @@ -80,8 +81,8 @@ func BenchmarkMain(m *testing.B) {
m.ResetTimer()

impls := []go_concurrency.Cache{um, lm, sm, cm, sc, im, im2}
names := []string{"unshared", "lock", "sync", "channel", "shard", "intmap", "intmap2"}
multi := []bool{false, true, true, true, false, true, true}
names := []string{"unshared", "lock", "sync", "channel", "shard", "shareshard", "intmap", "intmap2"}
multi := []bool{false, true, true, true, false, true, true, true}

for i := 0; i < len(impls); i++ {
impl := impls[i]
Expand Down

0 comments on commit aed8afc

Please sign in to comment.