@@ -13,8 +13,8 @@ import (
1313// Store wraps an in-memory cache around an underlying types.KVStore.
1414type Store struct {
1515 mtx sync.RWMutex
16- cache * sync. Map
17- deleted * sync. Map
16+ cache map [ string ] * types. CValue
17+ deleted map [ string ] struct {}
1818 parent types.KVStore
1919 storeKey types.StoreKey
2020 cacheSize int
@@ -25,8 +25,8 @@ var _ types.CacheKVStore = (*Store)(nil)
2525// NewStore creates a new Store object
2626func NewStore (parent types.KVStore , storeKey types.StoreKey , cacheSize int ) * Store {
2727 return & Store {
28- cache : & sync. Map {} ,
29- deleted : & sync. Map {} ,
28+ cache : make ( map [ string ] * types. CValue ) ,
29+ deleted : make ( map [ string ] struct {}) ,
3030 parent : parent ,
3131 storeKey : storeKey ,
3232 cacheSize : cacheSize ,
@@ -44,8 +44,11 @@ func (store *Store) GetStoreType() types.StoreType {
4444
4545// getFromCache queries the write-through cache for a value by key.
4646func (store * Store ) getFromCache (key []byte ) []byte {
47- if cv , ok := store .cache .Load (UnsafeBytesToStr (key )); ok {
48- return cv .(* types.CValue ).Value ()
47+ store .mtx .RLock ()
48+ cv , ok := store .cache [UnsafeBytesToStr (key )]
49+ store .mtx .RUnlock ()
50+ if ok {
51+ return cv .Value ()
4952 }
5053 return store .parent .Get (key )
5154}
@@ -84,12 +87,11 @@ func (store *Store) Write() {
8487 // Not the best, but probably not a bottleneck depending.
8588 keys := []string {}
8689
87- store . cache . Range ( func ( key , value any ) bool {
88- if value .( * types. CValue ). Dirty () {
89- keys = append (keys , key .( string ) )
90+ for key , value := range store . cache {
91+ if value .Dirty () {
92+ keys = append (keys , key )
9093 }
91- return true
92- })
94+ }
9395 sort .Strings (keys )
9496 // TODO: Consider allowing usage of Batch, which would allow the write to
9597 // at least happen atomically.
@@ -103,10 +105,10 @@ func (store *Store) Write() {
103105 continue
104106 }
105107
106- cacheValue , ok := store .cache . Load ( key )
107- if ok && cacheValue .( * types. CValue ). Value () != nil {
108+ cacheValue , ok := store .cache [ key ]
109+ if ok && cacheValue .Value () != nil {
108110 // It already exists in the parent, hence delete it.
109- store .parent .Set ([]byte (key ), cacheValue .( * types. CValue ). Value ())
111+ store .parent .Set ([]byte (key ), cacheValue .Value ())
110112 }
111113 }
112114
@@ -115,14 +117,11 @@ func (store *Store) Write() {
115117 // writes immediately visible until Commit(). By keeping the cache populated
116118 // with clean entries, subsequent reads will still hit the cache instead of
117119 // falling through to the parent which can't read uncommitted data.
118- store .cache .Range (func (key , value any ) bool {
119- cv := value .(* types.CValue )
120- // Replace with a clean (non-dirty) version of the same value
121- store .cache .Store (key , types .NewCValue (cv .Value (), false ))
122- return true
123- })
120+ for key , cv := range store .cache {
121+ store .cache [key ] = types .NewCValue (cv .Value (), false )
122+ }
124123 // Clear the deleted map since those deletes have been sent to parent
125- store .deleted = & sync. Map {}
124+ store .deleted = make ( map [ string ] struct {})
126125}
127126
128127// CacheWrap implements CacheWrapper.
@@ -142,18 +141,20 @@ func (store *Store) VersionExists(version int64) bool {
142141// Only entrypoint to mutate store.cache.
143142func (store * Store ) setCacheValue (key , value []byte , deleted bool , dirty bool ) {
144143 types .AssertValidKey (key )
144+ store .mtx .Lock ()
145+ defer store .mtx .Unlock ()
145146
146147 keyStr := UnsafeBytesToStr (key )
147- store .cache . Store ( keyStr , types .NewCValue (value , dirty ) )
148+ store .cache [ keyStr ] = types .NewCValue (value , dirty )
148149 if deleted {
149- store .deleted . Store ( keyStr , struct {}{})
150+ store .deleted [ keyStr ] = struct {}{}
150151 } else {
151- store .deleted . Delete ( keyStr )
152+ delete ( store .deleted , keyStr )
152153 }
153154}
154155
155156func (store * Store ) isDeleted (key string ) bool {
156- _ , ok := store .deleted . Load ( key )
157+ _ , ok := store .deleted [ key ]
157158 return ok
158159}
159160
@@ -169,24 +170,26 @@ func (store *Store) DeleteAll(start, end []byte) error {
169170}
170171
171172func (store * Store ) GetAllKeyStrsInRange (start , end []byte ) (res []string ) {
173+ store .mtx .RLock ()
174+ defer store .mtx .RUnlock ()
175+
172176 keyStrs := map [string ]struct {}{}
173177 for _ , pk := range store .parent .GetAllKeyStrsInRange (start , end ) {
174178 keyStrs [pk ] = struct {}{}
175179 }
176- store . cache . Range ( func ( key , value any ) bool {
177- kbz := []byte (key .( string ) )
180+ for key , value := range store . cache {
181+ kbz := []byte (key )
178182 if bytes .Compare (kbz , start ) < 0 || bytes .Compare (kbz , end ) >= 0 {
179183 // we don't want to break out of the iteration since cache isn't sorted
180- return true
184+ continue
181185 }
182- cv := value .( * types. CValue )
186+ cv := value
183187 if cv .Value () == nil {
184- delete (keyStrs , key .( string ) )
188+ delete (keyStrs , key )
185189 } else {
186- keyStrs [key .( string ) ] = struct {}{}
190+ keyStrs [key ] = struct {}{}
187191 }
188- return true
189- })
192+ }
190193 for k := range keyStrs {
191194 res = append (res , k )
192195 }
0 commit comments