1
1
package history
2
2
3
3
import (
4
+ "fmt"
4
5
"sync"
5
6
"sync/atomic"
6
7
43
44
timerSequenceNumber int64
44
45
rangeSize uint
45
46
closeCh chan <- int
46
- isClosed int32
47
+ isClosed bool
47
48
logger bark.Logger
48
49
metricsClient metrics.Client
49
50
@@ -132,6 +133,7 @@ func (s *shardContextImpl) CreateWorkflowExecution(request *persistence.CreateWo
132
133
if err != nil {
133
134
return nil , err
134
135
}
136
+ s .logger .Debugf ("Assigning transfer task ID: %v" , id )
135
137
task .SetTaskID (id )
136
138
transferMaxReadLevel = id
137
139
}
@@ -155,7 +157,8 @@ Create_Loop:
155
157
s .closeShard ()
156
158
}
157
159
}
158
- case * shared.InternalServiceError , * persistence.TimeoutError :
160
+ case * shared.WorkflowExecutionAlreadyStartedError :
161
+ default :
159
162
{
160
163
// We have no idea if the write failed or will eventually make it to
161
164
// persistence. Increment RangeID to guarantee that subsequent reads
@@ -191,6 +194,7 @@ func (s *shardContextImpl) UpdateWorkflowExecution(request *persistence.UpdateWo
191
194
if err != nil {
192
195
return err
193
196
}
197
+ s .logger .Debugf ("Assigning transfer task ID: %v" , id )
194
198
task .SetTaskID (id )
195
199
transferMaxReadLevel = id
196
200
}
@@ -201,6 +205,7 @@ func (s *shardContextImpl) UpdateWorkflowExecution(request *persistence.UpdateWo
201
205
if err != nil {
202
206
return err
203
207
}
208
+ s .logger .Debugf ("Assigning transfer task ID: %v" , id )
204
209
task .SetTaskID (id )
205
210
transferMaxReadLevel = id
206
211
}
@@ -225,7 +230,8 @@ Update_Loop:
225
230
s .closeShard ()
226
231
}
227
232
}
228
- case * shared.InternalServiceError , * persistence.TimeoutError :
233
+ case * persistence.ConditionFailedError :
234
+ default :
229
235
{
230
236
// We have no idea if the write failed or will eventually make it to
231
237
// persistence. Increment RangeID to guarantee that subsequent reads
@@ -277,11 +283,15 @@ func (s *shardContextImpl) getRangeID() int64 {
277
283
}
278
284
279
285
func (s * shardContextImpl ) closeShard () {
280
- if ! atomic . CompareAndSwapInt32 ( & s .isClosed , 0 , 1 ) {
286
+ if s .isClosed {
281
287
return
282
288
}
283
289
284
- s .rangeID = - 1 // fails any writes that may start after this point.
290
+ s .isClosed = true
291
+
292
+ // fails any writes that may start after this point.
293
+ s .shardInfo .RangeID = - 1
294
+ atomic .StoreInt64 (& s .rangeID , s .shardInfo .RangeID )
285
295
286
296
if s .closeCh != nil {
287
297
// This is the channel passed in by shard controller to monitor if a shard needs to be unloaded
@@ -320,6 +330,8 @@ func (s *shardContextImpl) renewRangeLocked(isStealing bool) error {
320
330
ShardInfo : updatedShardInfo ,
321
331
PreviousRangeID : s .shardInfo .RangeID })
322
332
if err != nil {
333
+ logPersistantStoreErrorEvent (s .logger , tagValueStoreOperationUpdateShard , err ,
334
+ fmt .Sprintf ("{RangeID: %v}" , s .shardInfo .RangeID ))
323
335
// Shard is stolen, trigger history engine shutdown
324
336
if _ , ok := err .(* persistence.ShardOwnershipLostError ); ok {
325
337
s .closeShard ()
0 commit comments