@@ -36,8 +36,9 @@ import (
36
36
// transition, typically corresponding to a block execution. It can also represent
37
37
// the combined trie node set from several aggregated state transitions.
38
38
type nodeSet struct {
39
- size uint64 // aggregated size of the trie node
40
- nodes map [common.Hash ]map [string ]* trienode.Node // node set, mapped by owner and path
39
+ size uint64 // aggregated size of the trie node
40
+ accountNodes map [string ]* trienode.Node // account trie nodes, mapped by path
41
+ storageNodes map [common.Hash ]map [string ]* trienode.Node // storage trie nodes, mapped by owner and path
41
42
}
42
43
43
44
// newNodeSet constructs the set with the provided dirty trie nodes.
@@ -46,21 +47,30 @@ func newNodeSet(nodes map[common.Hash]map[string]*trienode.Node) *nodeSet {
46
47
if nodes == nil {
47
48
nodes = make (map [common.Hash ]map [string ]* trienode.Node )
48
49
}
49
- s := & nodeSet {nodes : nodes }
50
+ s := & nodeSet {
51
+ accountNodes : make (map [string ]* trienode.Node ),
52
+ storageNodes : make (map [common.Hash ]map [string ]* trienode.Node ),
53
+ }
54
+ for owner , subset := range nodes {
55
+ if owner == (common.Hash {}) {
56
+ s .accountNodes = subset
57
+ } else {
58
+ s .storageNodes [owner ] = subset
59
+ }
60
+ }
50
61
s .computeSize ()
51
62
return s
52
63
}
53
64
54
65
// computeSize calculates the database size of the held trie nodes.
55
66
func (s * nodeSet ) computeSize () {
56
67
var size uint64
57
- for owner , subset := range s .nodes {
58
- var prefix int
59
- if owner != (common.Hash {}) {
60
- prefix = common .HashLength // owner (32 bytes) for storage trie nodes
61
- }
68
+ for path , n := range s .accountNodes {
69
+ size += uint64 (len (n .Blob ) + len (path ))
70
+ }
71
+ for _ , subset := range s .storageNodes {
62
72
for path , n := range subset {
63
- size += uint64 (prefix + len (n .Blob ) + len (path ))
73
+ size += uint64 (common . HashLength + len (n .Blob ) + len (path ))
64
74
}
65
75
}
66
76
s .size = size
@@ -79,15 +89,18 @@ func (s *nodeSet) updateSize(delta int64) {
79
89
80
90
// node retrieves the trie node with node path and its trie identifier.
81
91
func (s * nodeSet ) node (owner common.Hash , path []byte ) (* trienode.Node , bool ) {
82
- subset , ok := s .nodes [owner ]
83
- if ! ok {
84
- return nil , false
92
+ // Account trie node
93
+ if owner == (common.Hash {}) {
94
+ n , ok := s .accountNodes [string (path )]
95
+ return n , ok
85
96
}
86
- n , ok := subset [string (path )]
97
+ // Storage trie node
98
+ subset , ok := s .storageNodes [owner ]
87
99
if ! ok {
88
100
return nil , false
89
101
}
90
- return n , true
102
+ n , ok := subset [string (path )]
103
+ return n , ok
91
104
}
92
105
93
106
// merge integrates the provided dirty nodes into the set. The provided nodeset
@@ -97,35 +110,44 @@ func (s *nodeSet) merge(set *nodeSet) {
97
110
delta int64 // size difference resulting from node merging
98
111
overwrite counter // counter of nodes being overwritten
99
112
)
100
- for owner , subset := range set .nodes {
101
- var prefix int
102
- if owner != (common.Hash {}) {
103
- prefix = common .HashLength
113
+
114
+ // Merge account nodes
115
+ for path , n := range set .accountNodes {
116
+ if orig , exist := s .accountNodes [path ]; ! exist {
117
+ delta += int64 (len (n .Blob ) + len (path ))
118
+ } else {
119
+ delta += int64 (len (n .Blob ) - len (orig .Blob ))
120
+ overwrite .add (len (orig .Blob ) + len (path ))
104
121
}
105
- current , exist := s .nodes [owner ]
122
+ s .accountNodes [path ] = n
123
+ }
124
+
125
+ // Merge storage nodes
126
+ for owner , subset := range set .storageNodes {
127
+ current , exist := s .storageNodes [owner ]
106
128
if ! exist {
107
129
for path , n := range subset {
108
- delta += int64 (prefix + len (n .Blob ) + len (path ))
130
+ delta += int64 (common . HashLength + len (n .Blob ) + len (path ))
109
131
}
110
132
// Perform a shallow copy of the map for the subset instead of claiming it
111
133
// directly from the provided nodeset to avoid potential concurrent map
112
134
// read/write issues. The nodes belonging to the original diff layer remain
113
135
// accessible even after merging. Therefore, ownership of the nodes map
114
136
// should still belong to the original layer, and any modifications to it
115
137
// should be prevented.
116
- s .nodes [owner ] = maps .Clone (subset )
138
+ s .storageNodes [owner ] = maps .Clone (subset )
117
139
continue
118
140
}
119
141
for path , n := range subset {
120
142
if orig , exist := current [path ]; ! exist {
121
- delta += int64 (prefix + len (n .Blob ) + len (path ))
143
+ delta += int64 (common . HashLength + len (n .Blob ) + len (path ))
122
144
} else {
123
145
delta += int64 (len (n .Blob ) - len (orig .Blob ))
124
- overwrite .add (prefix + len (orig .Blob ) + len (path ))
146
+ overwrite .add (common . HashLength + len (orig .Blob ) + len (path ))
125
147
}
126
148
current [path ] = n
127
149
}
128
- s .nodes [owner ] = current
150
+ s .storageNodes [owner ] = current
129
151
}
130
152
overwrite .report (gcTrieNodeMeter , gcTrieNodeBytesMeter )
131
153
s .updateSize (delta )
@@ -136,34 +158,38 @@ func (s *nodeSet) merge(set *nodeSet) {
136
158
func (s * nodeSet ) revertTo (db ethdb.KeyValueReader , nodes map [common.Hash ]map [string ]* trienode.Node ) {
137
159
var delta int64
138
160
for owner , subset := range nodes {
139
- current , ok := s .nodes [owner ]
140
- if ! ok {
141
- panic (fmt .Sprintf ("non-existent subset (%x)" , owner ))
142
- }
143
- for path , n := range subset {
144
- orig , ok := current [path ]
145
- if ! ok {
146
- // There is a special case in merkle tree that one child is removed
147
- // from a fullNode which only has two children, and then a new child
148
- // with different position is immediately inserted into the fullNode.
149
- // In this case, the clean child of the fullNode will also be marked
150
- // as dirty because of node collapse and expansion. In case of database
151
- // rollback, don't panic if this "clean" node occurs which is not
152
- // present in buffer.
153
- var blob []byte
154
- if owner == (common.Hash {}) {
155
- blob = rawdb .ReadAccountTrieNode (db , []byte (path ))
156
- } else {
157
- blob = rawdb .ReadStorageTrieNode (db , owner , []byte (path ))
161
+ if owner == (common.Hash {}) {
162
+ // Account trie nodes
163
+ for path , n := range subset {
164
+ orig , ok := s .accountNodes [path ]
165
+ if ! ok {
166
+ blob := rawdb .ReadAccountTrieNode (db , []byte (path ))
167
+ if bytes .Equal (blob , n .Blob ) {
168
+ continue
169
+ }
170
+ panic (fmt .Sprintf ("non-existent account node (%v) blob: %v" , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
158
171
}
159
- // Ignore the clean node in the case described above.
160
- if bytes .Equal (blob , n .Blob ) {
161
- continue
172
+ s .accountNodes [path ] = n
173
+ delta += int64 (len (n .Blob )) - int64 (len (orig .Blob ))
174
+ }
175
+ } else {
176
+ // Storage trie nodes
177
+ current , ok := s .storageNodes [owner ]
178
+ if ! ok {
179
+ panic (fmt .Sprintf ("non-existent subset (%x)" , owner ))
180
+ }
181
+ for path , n := range subset {
182
+ orig , ok := current [path ]
183
+ if ! ok {
184
+ blob := rawdb .ReadStorageTrieNode (db , owner , []byte (path ))
185
+ if bytes .Equal (blob , n .Blob ) {
186
+ continue
187
+ }
188
+ panic (fmt .Sprintf ("non-existent storage node (%x %v) blob: %v" , owner , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
162
189
}
163
- panic (fmt .Sprintf ("non-existent node (%x %v) blob: %v" , owner , path , crypto .Keccak256Hash (n .Blob ).Hex ()))
190
+ current [path ] = n
191
+ delta += int64 (len (n .Blob )) - int64 (len (orig .Blob ))
164
192
}
165
- current [path ] = n
166
- delta += int64 (len (n .Blob )) - int64 (len (orig .Blob ))
167
193
}
168
194
}
169
195
s .updateSize (delta )
@@ -184,8 +210,21 @@ type journalNodes struct {
184
210
185
211
// encode serializes the content of trie nodes into the provided writer.
186
212
func (s * nodeSet ) encode (w io.Writer ) error {
187
- nodes := make ([]journalNodes , 0 , len (s .nodes ))
188
- for owner , subset := range s .nodes {
213
+ nodes := make ([]journalNodes , 0 , len (s .storageNodes )+ 1 )
214
+
215
+ // Encode account nodes
216
+ if len (s .accountNodes ) > 0 {
217
+ entry := journalNodes {Owner : common.Hash {}}
218
+ for path , node := range s .accountNodes {
219
+ entry .Nodes = append (entry .Nodes , journalNode {
220
+ Path : []byte (path ),
221
+ Blob : node .Blob ,
222
+ })
223
+ }
224
+ nodes = append (nodes , entry )
225
+ }
226
+ // Encode storage nodes
227
+ for owner , subset := range s .storageNodes {
189
228
entry := journalNodes {Owner : owner }
190
229
for path , node := range subset {
191
230
entry .Nodes = append (entry .Nodes , journalNode {
@@ -204,43 +243,61 @@ func (s *nodeSet) decode(r *rlp.Stream) error {
204
243
if err := r .Decode (& encoded ); err != nil {
205
244
return fmt .Errorf ("load nodes: %v" , err )
206
245
}
207
- nodes := make (map [common.Hash ]map [string ]* trienode.Node )
246
+ s .accountNodes = make (map [string ]* trienode.Node )
247
+ s .storageNodes = make (map [common.Hash ]map [string ]* trienode.Node )
248
+
208
249
for _ , entry := range encoded {
209
- subset := make (map [string ]* trienode.Node )
210
- for _ , n := range entry .Nodes {
211
- if len (n .Blob ) > 0 {
212
- subset [string (n .Path )] = trienode .New (crypto .Keccak256Hash (n .Blob ), n .Blob )
213
- } else {
214
- subset [string (n .Path )] = trienode .NewDeleted ()
250
+ if entry .Owner == (common.Hash {}) {
251
+ // Account nodes
252
+ for _ , n := range entry .Nodes {
253
+ if len (n .Blob ) > 0 {
254
+ s .accountNodes [string (n .Path )] = trienode .New (crypto .Keccak256Hash (n .Blob ), n .Blob )
255
+ } else {
256
+ s .accountNodes [string (n .Path )] = trienode .NewDeleted ()
257
+ }
258
+ }
259
+ } else {
260
+ // Storage nodes
261
+ subset := make (map [string ]* trienode.Node )
262
+ for _ , n := range entry .Nodes {
263
+ if len (n .Blob ) > 0 {
264
+ subset [string (n .Path )] = trienode .New (crypto .Keccak256Hash (n .Blob ), n .Blob )
265
+ } else {
266
+ subset [string (n .Path )] = trienode .NewDeleted ()
267
+ }
215
268
}
269
+ s .storageNodes [entry .Owner ] = subset
216
270
}
217
- nodes [entry .Owner ] = subset
218
271
}
219
- s .nodes = nodes
220
272
s .computeSize ()
221
273
return nil
222
274
}
223
275
224
276
// write flushes nodes into the provided database batch as a whole.
225
277
func (s * nodeSet ) write (batch ethdb.Batch , clean * fastcache.Cache ) int {
226
- return writeNodes (batch , s .nodes , clean )
278
+ nodes := make (map [common.Hash ]map [string ]* trienode.Node )
279
+ if len (s .accountNodes ) > 0 {
280
+ nodes [common.Hash {}] = s .accountNodes
281
+ }
282
+ for owner , subset := range s .storageNodes {
283
+ nodes [owner ] = subset
284
+ }
285
+ return writeNodes (batch , nodes , clean )
227
286
}
228
287
229
288
// reset clears all cached trie node data.
230
289
func (s * nodeSet ) reset () {
231
- s .nodes = make (map [common.Hash ]map [string ]* trienode.Node )
290
+ s .accountNodes = make (map [string ]* trienode.Node )
291
+ s .storageNodes = make (map [common.Hash ]map [string ]* trienode.Node )
232
292
s .size = 0
233
293
}
234
294
235
295
// dbsize returns the approximate size of db write.
236
296
func (s * nodeSet ) dbsize () int {
237
297
var m int
238
- for owner , nodes := range s .nodes {
239
- if owner == (common.Hash {}) {
240
- m += len (nodes ) * len (rawdb .TrieNodeAccountPrefix ) // database key prefix
241
- } else {
242
- m += len (nodes ) * (len (rawdb .TrieNodeStoragePrefix )) // database key prefix
243
- }
298
+ m += len (s .accountNodes ) * len (rawdb .TrieNodeAccountPrefix ) // database key prefix
299
+ for _ , nodes := range s .storageNodes {
300
+ m += len (nodes ) * (len (rawdb .TrieNodeStoragePrefix )) // database key prefix
244
301
}
245
302
return m + int (s .size )
246
303
}
0 commit comments