@@ -85,11 +85,17 @@ type FilterMaps struct {
85
85
// fields written by the indexer and read by matcher backend. Indexer can
86
86
// read them without a lock and write them under indexLock write lock.
87
87
// Matcher backend can read them under indexLock read lock.
88
- indexLock sync.RWMutex
89
- indexedRange filterMapsRange
90
- cleanedEpochsBefore uint32 // all unindexed data cleaned before this point
91
- indexedView * ChainView // always consistent with the log index
92
- hasTempRange bool
88
+ indexLock sync.RWMutex
89
+ indexedRange filterMapsRange
90
+ indexedView * ChainView // always consistent with the log index
91
+ hasTempRange bool
92
+
93
+ // cleanedEpochsBefore indicates that all unindexed data before this point
94
+ // has been cleaned.
95
+ //
96
+ // This field is only accessed and modified within tryUnindexTail, so no
97
+ // explicit locking is required.
98
+ cleanedEpochsBefore uint32
93
99
94
100
// also accessed by indexer and matcher backend but no locking needed.
95
101
filterMapCache * lru.Cache [uint32 , filterMap ]
@@ -248,15 +254,16 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
248
254
},
249
255
// deleting last unindexed epoch might have been interrupted by shutdown
250
256
cleanedEpochsBefore : max (rs .MapsFirst >> params .logMapsPerEpoch , 1 ) - 1 ,
251
- historyCutoff : historyCutoff ,
252
- finalBlock : finalBlock ,
253
- matcherSyncCh : make (chan * FilterMapsMatcherBackend ),
254
- matchers : make (map [* FilterMapsMatcherBackend ]struct {}),
255
- filterMapCache : lru.NewCache [uint32 , filterMap ](cachedFilterMaps ),
256
- lastBlockCache : lru.NewCache [uint32 , lastBlockOfMap ](cachedLastBlocks ),
257
- lvPointerCache : lru.NewCache [uint64 , uint64 ](cachedLvPointers ),
258
- baseRowsCache : lru.NewCache [uint64 , [][]uint32 ](cachedBaseRows ),
259
- renderSnapshots : lru.NewCache [uint64 , * renderedMap ](cachedRenderSnapshots ),
257
+
258
+ historyCutoff : historyCutoff ,
259
+ finalBlock : finalBlock ,
260
+ matcherSyncCh : make (chan * FilterMapsMatcherBackend ),
261
+ matchers : make (map [* FilterMapsMatcherBackend ]struct {}),
262
+ filterMapCache : lru.NewCache [uint32 , filterMap ](cachedFilterMaps ),
263
+ lastBlockCache : lru.NewCache [uint32 , lastBlockOfMap ](cachedLastBlocks ),
264
+ lvPointerCache : lru.NewCache [uint64 , uint64 ](cachedLvPointers ),
265
+ baseRowsCache : lru.NewCache [uint64 , [][]uint32 ](cachedBaseRows ),
266
+ renderSnapshots : lru.NewCache [uint64 , * renderedMap ](cachedRenderSnapshots ),
260
267
}
261
268
262
269
// Set initial indexer target.
@@ -444,6 +451,7 @@ func (f *FilterMaps) safeDeleteWithLogs(deleteFn func(db ethdb.KeyValueStore, ha
444
451
445
452
// setRange updates the indexed chain view and covered range and also adds the
446
453
// changes to the given batch.
454
+ //
447
455
// Note that this function assumes that the index write lock is being held.
448
456
func (f * FilterMaps ) setRange (batch ethdb.KeyValueWriter , newView * ChainView , newRange filterMapsRange , isTempRange bool ) {
449
457
f .indexedView = newView
@@ -477,6 +485,7 @@ func (f *FilterMaps) setRange(batch ethdb.KeyValueWriter, newView *ChainView, ne
477
485
// Note that this function assumes that the log index structure is consistent
478
486
// with the canonical chain at the point where the given log value index points.
479
487
// If this is not the case then an invalid result or an error may be returned.
488
+ //
480
489
// Note that this function assumes that the indexer read lock is being held when
481
490
// called from outside the indexerLoop goroutine.
482
491
func (f * FilterMaps ) getLogByLvIndex (lvIndex uint64 ) (* types.Log , error ) {
@@ -655,6 +664,7 @@ func (f *FilterMaps) mapRowIndex(mapIndex, rowIndex uint32) uint64 {
655
664
// getBlockLvPointer returns the starting log value index where the log values
656
665
// generated by the given block are located. If blockNumber is beyond the current
657
666
// head then the first unoccupied log value index is returned.
667
+ //
658
668
// Note that this function assumes that the indexer read lock is being held when
659
669
// called from outside the indexerLoop goroutine.
660
670
func (f * FilterMaps ) getBlockLvPointer (blockNumber uint64 ) (uint64 , error ) {
@@ -762,7 +772,7 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
762
772
return false , errors .New ("invalid tail epoch number" )
763
773
}
764
774
// remove index data
765
- if err := f . safeDeleteWithLogs ( func (db ethdb.KeyValueStore , hashScheme bool , stopCb func (bool ) bool ) error {
775
+ deleteFn := func (db ethdb.KeyValueStore , hashScheme bool , stopCb func (bool ) bool ) error {
766
776
first := f .mapRowIndex (firstMap , 0 )
767
777
count := f .mapRowIndex (firstMap + f .mapsPerEpoch , 0 ) - first
768
778
if err := rawdb .DeleteFilterMapRows (f .db , common .NewRange (first , count ), hashScheme , stopCb ); err != nil {
@@ -786,10 +796,13 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
786
796
f .lvPointerCache .Remove (blockNumber )
787
797
}
788
798
return nil
789
- }, fmt .Sprintf ("Deleting tail epoch #%d" , epoch ), func () bool {
799
+ }
800
+ action := fmt .Sprintf ("Deleting tail epoch #%d" , epoch )
801
+ stopFn := func () bool {
790
802
f .processEvents ()
791
803
return f .stop || ! f .targetHeadIndexed ()
792
- }); err == nil {
804
+ }
805
+ if err := f .safeDeleteWithLogs (deleteFn , action , stopFn ); err == nil {
793
806
// everything removed; mark as cleaned and report success
794
807
if f .cleanedEpochsBefore == epoch {
795
808
f .cleanedEpochsBefore = epoch + 1
@@ -808,6 +821,9 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
808
821
}
809
822
810
823
// exportCheckpoints exports epoch checkpoints in the format used by checkpoints.go.
824
+ //
825
+ // Note: acquiring the indexLock read lock is unnecessary here, as this function
826
+ // is always called within the indexLoop.
811
827
func (f * FilterMaps ) exportCheckpoints () {
812
828
finalLvPtr , err := f .getBlockLvPointer (f .finalBlock + 1 )
813
829
if err != nil {
0 commit comments