@@ -40,9 +40,10 @@ static void PlatformWorkerThread(void* data) {
40
40
worker_data->platform_workers_ready ->Signal (lock);
41
41
}
42
42
43
- while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop ()) {
43
+ while (std::unique_ptr<Task> task =
44
+ pending_worker_tasks->Lock ().BlockingPop ()) {
44
45
task->Run ();
45
- pending_worker_tasks->NotifyOfCompletion ();
46
+ pending_worker_tasks->Lock (). NotifyOfCompletion ();
46
47
}
47
48
}
48
49
@@ -73,13 +74,15 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
73
74
}
74
75
75
76
void PostDelayedTask (std::unique_ptr<Task> task, double delay_in_seconds) {
76
- tasks_.Push (std::make_unique<ScheduleTask>(this , std::move (task),
77
- delay_in_seconds));
77
+ auto locked = tasks_.Lock ();
78
+ locked.Push (std::make_unique<ScheduleTask>(
79
+ this , std::move (task), delay_in_seconds));
78
80
uv_async_send (&flush_tasks_);
79
81
}
80
82
81
83
void Stop () {
82
- tasks_.Push (std::make_unique<StopTask>(this ));
84
+ auto locked = tasks_.Lock ();
85
+ locked.Push (std::make_unique<StopTask>(this ));
83
86
uv_async_send (&flush_tasks_);
84
87
}
85
88
@@ -100,8 +103,19 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
100
103
static void FlushTasks (uv_async_t * flush_tasks) {
101
104
DelayedTaskScheduler* scheduler =
102
105
ContainerOf (&DelayedTaskScheduler::loop_, flush_tasks->loop );
103
- while (std::unique_ptr<Task> task = scheduler->tasks_ .Pop ())
106
+
107
+ std::vector<std::unique_ptr<Task>> tasks_to_run;
108
+ {
109
+ auto locked = scheduler->tasks_ .Lock ();
110
+ std::unique_ptr<Task> task;
111
+ while ((task = locked.Pop ())) {
112
+ tasks_to_run.push_back (std::move (task));
113
+ }
114
+ }
115
+
116
+ for (auto & task : tasks_to_run) {
104
117
task->Run ();
118
+ }
105
119
}
106
120
107
121
class StopTask : public Task {
@@ -149,7 +163,8 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
149
163
static void RunTask (uv_timer_t * timer) {
150
164
DelayedTaskScheduler* scheduler =
151
165
ContainerOf (&DelayedTaskScheduler::loop_, timer->loop );
152
- scheduler->pending_worker_tasks_ ->Push (scheduler->TakeTimerTask (timer));
166
+ scheduler->pending_worker_tasks_ ->Lock ().Push (
167
+ scheduler->TakeTimerTask (timer));
153
168
}
154
169
155
170
std::unique_ptr<Task> TakeTimerTask (uv_timer_t * timer) {
@@ -203,7 +218,7 @@ WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) {
203
218
}
204
219
205
220
void WorkerThreadsTaskRunner::PostTask (std::unique_ptr<Task> task) {
206
- pending_worker_tasks_.Push (std::move (task));
221
+ pending_worker_tasks_.Lock (). Push (std::move (task));
207
222
}
208
223
209
224
void WorkerThreadsTaskRunner::PostDelayedTask (std::unique_ptr<Task> task,
@@ -212,11 +227,11 @@ void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
212
227
}
213
228
214
229
void WorkerThreadsTaskRunner::BlockingDrain () {
215
- pending_worker_tasks_.BlockingDrain ();
230
+ pending_worker_tasks_.Lock (). BlockingDrain ();
216
231
}
217
232
218
233
void WorkerThreadsTaskRunner::Shutdown () {
219
- pending_worker_tasks_.Stop ();
234
+ pending_worker_tasks_.Lock (). Stop ();
220
235
delayed_task_scheduler_->Stop ();
221
236
for (size_t i = 0 ; i < threads_.size (); i++) {
222
237
CHECK_EQ (0 , uv_thread_join (threads_[i].get ()));
@@ -253,29 +268,23 @@ void PerIsolatePlatformData::PostIdleTaskImpl(
253
268
254
269
void PerIsolatePlatformData::PostTaskImpl (std::unique_ptr<Task> task,
255
270
const v8::SourceLocation& location) {
256
- if (flush_tasks_ == nullptr ) {
257
- // V8 may post tasks during Isolate disposal. In that case, the only
258
- // sensible path forward is to discard the task.
259
- return ;
260
- }
261
- foreground_tasks_.Push (std::move (task));
271
+ auto locked = foreground_tasks_.Lock ();
272
+ if (flush_tasks_ == nullptr ) return ;
273
+ locked.Push (std::move (task));
262
274
uv_async_send (flush_tasks_);
263
275
}
264
276
265
277
void PerIsolatePlatformData::PostDelayedTaskImpl (
266
278
std::unique_ptr<Task> task,
267
279
double delay_in_seconds,
268
280
const v8::SourceLocation& location) {
269
- if (flush_tasks_ == nullptr ) {
270
- // V8 may post tasks during Isolate disposal. In that case, the only
271
- // sensible path forward is to discard the task.
272
- return ;
273
- }
281
+ auto locked = foreground_delayed_tasks_.Lock ();
282
+ if (flush_tasks_ == nullptr ) return ;
274
283
std::unique_ptr<DelayedTask> delayed (new DelayedTask ());
275
284
delayed->task = std::move (task);
276
285
delayed->platform_data = shared_from_this ();
277
286
delayed->timeout = delay_in_seconds;
278
- foreground_delayed_tasks_ .Push (std::move (delayed));
287
+ locked .Push (std::move (delayed));
279
288
uv_async_send (flush_tasks_);
280
289
}
281
290
@@ -301,32 +310,30 @@ void PerIsolatePlatformData::AddShutdownCallback(void (*callback)(void*),
301
310
}
302
311
303
312
void PerIsolatePlatformData::Shutdown () {
304
- if (flush_tasks_ == nullptr )
305
- return ;
313
+ auto foreground_tasks_locked = foreground_tasks_. Lock ();
314
+ auto foreground_delayed_tasks_locked = foreground_delayed_tasks_. Lock () ;
306
315
307
- // While there should be no V8 tasks in the queues at this point, it is
308
- // possible that Node.js-internal tasks from e.g. the inspector are still
309
- // lying around. We clear these queues and ignore the return value,
310
- // effectively deleting the tasks instead of running them.
311
- foreground_delayed_tasks_.PopAll ();
312
- foreground_tasks_.PopAll ();
316
+ foreground_delayed_tasks_locked.PopAll ();
317
+ foreground_tasks_locked.PopAll ();
313
318
scheduled_delayed_tasks_.clear ();
314
319
315
- // Both destroying the scheduled_delayed_tasks_ lists and closing
316
- // flush_tasks_ handle add tasks to the event loop. We keep a count of all
317
- // non-closed handles, and when that reaches zero, we inform any shutdown
318
- // callbacks that the platform is done as far as this Isolate is concerned.
319
- self_reference_ = shared_from_this ();
320
- uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
321
- [](uv_handle_t * handle) {
322
- std::unique_ptr<uv_async_t > flush_tasks {
323
- reinterpret_cast <uv_async_t *>(handle) };
324
- PerIsolatePlatformData* platform_data =
325
- static_cast <PerIsolatePlatformData*>(flush_tasks->data );
326
- platform_data->DecreaseHandleCount ();
327
- platform_data->self_reference_ .reset ();
328
- });
329
- flush_tasks_ = nullptr ;
320
+ if (flush_tasks_ != nullptr ) {
321
+ // Both destroying the scheduled_delayed_tasks_ lists and closing
322
+ // flush_tasks_ handle add tasks to the event loop. We keep a count of all
323
+ // non-closed handles, and when that reaches zero, we inform any shutdown
324
+ // callbacks that the platform is done as far as this Isolate is concerned.
325
+ self_reference_ = shared_from_this ();
326
+ uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
327
+ [](uv_handle_t * handle) {
328
+ std::unique_ptr<uv_async_t > flush_tasks{
329
+ reinterpret_cast <uv_async_t *>(handle)};
330
+ PerIsolatePlatformData* platform_data =
331
+ static_cast <PerIsolatePlatformData*>(flush_tasks->data );
332
+ platform_data->DecreaseHandleCount ();
333
+ platform_data->self_reference_ .reset ();
334
+ });
335
+ flush_tasks_ = nullptr ;
336
+ }
330
337
}
331
338
332
339
void PerIsolatePlatformData::DecreaseHandleCount () {
@@ -472,39 +479,51 @@ void NodePlatform::DrainTasks(Isolate* isolate) {
472
479
bool PerIsolatePlatformData::FlushForegroundTasksInternal () {
473
480
bool did_work = false ;
474
481
475
- while (std::unique_ptr<DelayedTask> delayed =
476
- foreground_delayed_tasks_.Pop ()) {
477
- did_work = true ;
482
+ std::vector<std::unique_ptr<DelayedTask>> delayed_tasks_to_schedule;
483
+ {
484
+ auto locked_tasks = foreground_delayed_tasks_.Lock ();
485
+ std::unique_ptr<DelayedTask> delayed;
486
+ while ((delayed = locked_tasks.Pop ())) {
487
+ did_work = true ;
488
+ delayed_tasks_to_schedule.push_back (std::move (delayed));
489
+ }
490
+ }
491
+
492
+ for (auto & delayed : delayed_tasks_to_schedule) {
478
493
uint64_t delay_millis = llround (delayed->timeout * 1000 );
479
494
480
495
delayed->timer .data = static_cast <void *>(delayed.get ());
481
496
uv_timer_init (loop_, &delayed->timer );
482
- // Timers may not guarantee queue ordering of events with the same delay if
483
- // the delay is non-zero. This should not be a problem in practice.
497
+ // Timers may not guarantee queue ordering of events with the same delay
498
+ // if the delay is non-zero. This should not be a problem in practice.
484
499
uv_timer_start (&delayed->timer , RunForegroundTask, delay_millis, 0 );
485
500
uv_unref (reinterpret_cast <uv_handle_t *>(&delayed->timer ));
486
501
uv_handle_count_++;
487
502
488
- scheduled_delayed_tasks_.emplace_back (delayed.release (),
489
- [](DelayedTask* delayed) {
490
- uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
491
- [](uv_handle_t * handle) {
492
- std::unique_ptr<DelayedTask> task {
493
- static_cast <DelayedTask*>(handle->data ) };
494
- task->platform_data ->DecreaseHandleCount ();
495
- });
496
- });
503
+ scheduled_delayed_tasks_.emplace_back (
504
+ delayed.release (), [](DelayedTask* delayed) {
505
+ uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
506
+ [](uv_handle_t * handle) {
507
+ std::unique_ptr<DelayedTask> task{
508
+ static_cast <DelayedTask*>(handle->data )};
509
+ task->platform_data ->DecreaseHandleCount ();
510
+ });
511
+ });
512
+ }
513
+
514
+ std::queue<std::unique_ptr<Task>> tasks;
515
+ {
516
+ auto locked = foreground_tasks_.Lock ();
517
+ tasks = locked.PopAll ();
497
518
}
498
- // Move all foreground tasks into a separate queue and flush that queue.
499
- // This way tasks that are posted while flushing the queue will be run on the
500
- // next call of FlushForegroundTasksInternal.
501
- std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll ();
519
+
502
520
while (!tasks.empty ()) {
503
521
std::unique_ptr<Task> task = std::move (tasks.front ());
504
522
tasks.pop ();
505
523
did_work = true ;
506
524
RunForegroundTask (std::move (task));
507
525
}
526
+
508
527
return did_work;
509
528
}
510
529
@@ -594,66 +613,63 @@ TaskQueue<T>::TaskQueue()
594
613
outstanding_tasks_(0 ), stopped_(false ), task_queue_() { }
595
614
596
615
template <class T >
597
- void TaskQueue<T>::Push(std::unique_ptr<T> task) {
598
- Mutex::ScopedLock scoped_lock (lock_);
599
- outstanding_tasks_++;
600
- task_queue_.push (std::move (task));
601
- tasks_available_.Signal (scoped_lock);
616
+ TaskQueue<T>::Locked::Locked(TaskQueue* queue)
617
+ : queue_(queue), lock_(queue->lock_) {}
618
+
619
+ template <class T >
620
+ void TaskQueue<T>::Locked::Push(std::unique_ptr<T> task) {
621
+ queue_->outstanding_tasks_ ++;
622
+ queue_->task_queue_ .push (std::move (task));
623
+ queue_->tasks_available_ .Signal (lock_);
602
624
}
603
625
604
626
template <class T >
605
- std::unique_ptr<T> TaskQueue<T>::Pop() {
606
- Mutex::ScopedLock scoped_lock (lock_);
607
- if (task_queue_.empty ()) {
627
+ std::unique_ptr<T> TaskQueue<T>::Locked::Pop() {
628
+ if (queue_->task_queue_ .empty ()) {
608
629
return std::unique_ptr<T>(nullptr );
609
630
}
610
- std::unique_ptr<T> result = std::move (task_queue_.front ());
611
- task_queue_.pop ();
631
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
632
+ queue_-> task_queue_ .pop ();
612
633
return result;
613
634
}
614
635
615
636
template <class T >
616
- std::unique_ptr<T> TaskQueue<T>::BlockingPop() {
617
- Mutex::ScopedLock scoped_lock (lock_);
618
- while (task_queue_.empty () && !stopped_) {
619
- tasks_available_.Wait (scoped_lock);
637
+ std::unique_ptr<T> TaskQueue<T>::Locked::BlockingPop() {
638
+ while (queue_->task_queue_ .empty () && !queue_->stopped_ ) {
639
+ queue_->tasks_available_ .Wait (lock_);
620
640
}
621
- if (stopped_) {
641
+ if (queue_-> stopped_ ) {
622
642
return std::unique_ptr<T>(nullptr );
623
643
}
624
- std::unique_ptr<T> result = std::move (task_queue_.front ());
625
- task_queue_.pop ();
644
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
645
+ queue_-> task_queue_ .pop ();
626
646
return result;
627
647
}
628
648
629
649
template <class T >
630
- void TaskQueue<T>::NotifyOfCompletion() {
631
- Mutex::ScopedLock scoped_lock (lock_);
632
- if (--outstanding_tasks_ == 0 ) {
633
- tasks_drained_.Broadcast (scoped_lock);
650
+ void TaskQueue<T>::Locked::NotifyOfCompletion() {
651
+ if (--queue_->outstanding_tasks_ == 0 ) {
652
+ queue_->tasks_drained_ .Broadcast (lock_);
634
653
}
635
654
}
636
655
637
656
template <class T >
638
- void TaskQueue<T>::BlockingDrain() {
639
- Mutex::ScopedLock scoped_lock (lock_);
640
- while (outstanding_tasks_ > 0 ) {
641
- tasks_drained_.Wait (scoped_lock);
657
+ void TaskQueue<T>::Locked::BlockingDrain() {
658
+ while (queue_->outstanding_tasks_ > 0 ) {
659
+ queue_->tasks_drained_ .Wait (lock_);
642
660
}
643
661
}
644
662
645
663
template <class T >
646
- void TaskQueue<T>::Stop() {
647
- Mutex::ScopedLock scoped_lock (lock_);
648
- stopped_ = true ;
649
- tasks_available_.Broadcast (scoped_lock);
664
+ void TaskQueue<T>::Locked::Stop() {
665
+ queue_->stopped_ = true ;
666
+ queue_->tasks_available_ .Broadcast (lock_);
650
667
}
651
668
652
669
template <class T >
653
- std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() {
654
- Mutex::ScopedLock scoped_lock (lock_);
670
+ std::queue<std::unique_ptr<T>> TaskQueue<T>::Locked::PopAll() {
655
671
std::queue<std::unique_ptr<T>> result;
656
- result.swap (task_queue_);
672
+ result.swap (queue_-> task_queue_ );
657
673
return result;
658
674
}
659
675
0 commit comments