@@ -39,9 +39,10 @@ static void PlatformWorkerThread(void* data) {
39
39
worker_data->platform_workers_ready ->Signal (lock);
40
40
}
41
41
42
- while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop ()) {
42
+ while (std::unique_ptr<Task> task =
43
+ pending_worker_tasks->Lock ().BlockingPop ()) {
43
44
task->Run ();
44
- pending_worker_tasks->NotifyOfCompletion ();
45
+ pending_worker_tasks->Lock (). NotifyOfCompletion ();
45
46
}
46
47
}
47
48
@@ -72,13 +73,15 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
72
73
}
73
74
74
75
void PostDelayedTask (std::unique_ptr<Task> task, double delay_in_seconds) {
75
- tasks_.Push (std::make_unique<ScheduleTask>(this , std::move (task),
76
- delay_in_seconds));
76
+ auto locked = tasks_.Lock ();
77
+ locked.Push (std::make_unique<ScheduleTask>(
78
+ this , std::move (task), delay_in_seconds));
77
79
uv_async_send (&flush_tasks_);
78
80
}
79
81
80
82
void Stop () {
81
- tasks_.Push (std::make_unique<StopTask>(this ));
83
+ auto locked = tasks_.Lock ();
84
+ locked.Push (std::make_unique<StopTask>(this ));
82
85
uv_async_send (&flush_tasks_);
83
86
}
84
87
@@ -99,8 +102,14 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
99
102
static void FlushTasks (uv_async_t * flush_tasks) {
100
103
DelayedTaskScheduler* scheduler =
101
104
ContainerOf (&DelayedTaskScheduler::loop_, flush_tasks->loop );
102
- while (std::unique_ptr<Task> task = scheduler->tasks_ .Pop ())
105
+
106
+ std::queue<std::unique_ptr<Task>> tasks_to_run =
107
+ scheduler->tasks_ .Lock ().PopAll ();
108
+ while (!tasks_to_run.empty ()) {
109
+ std::unique_ptr<Task> task = std::move (tasks_to_run.front ());
110
+ tasks_to_run.pop ();
103
111
task->Run ();
112
+ }
104
113
}
105
114
106
115
class StopTask : public Task {
@@ -148,7 +157,8 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
148
157
static void RunTask (uv_timer_t * timer) {
149
158
DelayedTaskScheduler* scheduler =
150
159
ContainerOf (&DelayedTaskScheduler::loop_, timer->loop );
151
- scheduler->pending_worker_tasks_ ->Push (scheduler->TakeTimerTask (timer));
160
+ scheduler->pending_worker_tasks_ ->Lock ().Push (
161
+ scheduler->TakeTimerTask (timer));
152
162
}
153
163
154
164
std::unique_ptr<Task> TakeTimerTask (uv_timer_t * timer) {
@@ -202,7 +212,7 @@ WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) {
202
212
}
203
213
204
214
void WorkerThreadsTaskRunner::PostTask (std::unique_ptr<Task> task) {
205
- pending_worker_tasks_.Push (std::move (task));
215
+ pending_worker_tasks_.Lock (). Push (std::move (task));
206
216
}
207
217
208
218
void WorkerThreadsTaskRunner::PostDelayedTask (std::unique_ptr<Task> task,
@@ -211,11 +221,11 @@ void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
211
221
}
212
222
213
223
void WorkerThreadsTaskRunner::BlockingDrain () {
214
- pending_worker_tasks_.BlockingDrain ();
224
+ pending_worker_tasks_.Lock (). BlockingDrain ();
215
225
}
216
226
217
227
void WorkerThreadsTaskRunner::Shutdown () {
218
- pending_worker_tasks_.Stop ();
228
+ pending_worker_tasks_.Lock (). Stop ();
219
229
delayed_task_scheduler_->Stop ();
220
230
for (size_t i = 0 ; i < threads_.size (); i++) {
221
231
CHECK_EQ (0 , uv_thread_join (threads_[i].get ()));
@@ -250,27 +260,25 @@ void PerIsolatePlatformData::PostIdleTask(std::unique_ptr<v8::IdleTask> task) {
250
260
}
251
261
252
262
void PerIsolatePlatformData::PostTask (std::unique_ptr<Task> task) {
253
- if (flush_tasks_ == nullptr ) {
254
- // V8 may post tasks during Isolate disposal. In that case, the only
255
- // sensible path forward is to discard the task.
256
- return ;
257
- }
258
- foreground_tasks_.Push (std::move (task));
263
+ // The task can be posted from any V8 background worker thread, even when
264
+ // the foreground task runner is being cleaned up by Shutdown(). In that
265
+ // case, make sure we wait until the shutdown is completed (which leads
266
+ // to flush_tasks_ == nullptr, and the task will be discarded).
267
+ auto locked = foreground_tasks_.Lock ();
268
+ if (flush_tasks_ == nullptr ) return ;
269
+ locked.Push (std::move (task));
259
270
uv_async_send (flush_tasks_);
260
271
}
261
272
262
273
void PerIsolatePlatformData::PostDelayedTask (
263
274
std::unique_ptr<Task> task, double delay_in_seconds) {
264
- if (flush_tasks_ == nullptr ) {
265
- // V8 may post tasks during Isolate disposal. In that case, the only
266
- // sensible path forward is to discard the task.
267
- return ;
268
- }
275
+ auto locked = foreground_delayed_tasks_.Lock ();
276
+ if (flush_tasks_ == nullptr ) return ;
269
277
std::unique_ptr<DelayedTask> delayed (new DelayedTask ());
270
278
delayed->task = std::move (task);
271
279
delayed->platform_data = shared_from_this ();
272
280
delayed->timeout = delay_in_seconds;
273
- foreground_delayed_tasks_ .Push (std::move (delayed));
281
+ locked .Push (std::move (delayed));
274
282
uv_async_send (flush_tasks_);
275
283
}
276
284
@@ -294,32 +302,30 @@ void PerIsolatePlatformData::AddShutdownCallback(void (*callback)(void*),
294
302
}
295
303
296
304
void PerIsolatePlatformData::Shutdown () {
297
- if (flush_tasks_ == nullptr )
298
- return ;
305
+ auto foreground_tasks_locked = foreground_tasks_. Lock ();
306
+ auto foreground_delayed_tasks_locked = foreground_delayed_tasks_. Lock () ;
299
307
300
- // While there should be no V8 tasks in the queues at this point, it is
301
- // possible that Node.js-internal tasks from e.g. the inspector are still
302
- // lying around. We clear these queues and ignore the return value,
303
- // effectively deleting the tasks instead of running them.
304
- foreground_delayed_tasks_.PopAll ();
305
- foreground_tasks_.PopAll ();
308
+ foreground_delayed_tasks_locked.PopAll ();
309
+ foreground_tasks_locked.PopAll ();
306
310
scheduled_delayed_tasks_.clear ();
307
311
308
- // Both destroying the scheduled_delayed_tasks_ lists and closing
309
- // flush_tasks_ handle add tasks to the event loop. We keep a count of all
310
- // non-closed handles, and when that reaches zero, we inform any shutdown
311
- // callbacks that the platform is done as far as this Isolate is concerned.
312
- self_reference_ = shared_from_this ();
313
- uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
314
- [](uv_handle_t * handle) {
315
- std::unique_ptr<uv_async_t > flush_tasks {
316
- reinterpret_cast <uv_async_t *>(handle) };
317
- PerIsolatePlatformData* platform_data =
318
- static_cast <PerIsolatePlatformData*>(flush_tasks->data );
319
- platform_data->DecreaseHandleCount ();
320
- platform_data->self_reference_ .reset ();
321
- });
322
- flush_tasks_ = nullptr ;
312
+ if (flush_tasks_ != nullptr ) {
313
+ // Both destroying the scheduled_delayed_tasks_ lists and closing
314
+ // flush_tasks_ handle add tasks to the event loop. We keep a count of all
315
+ // non-closed handles, and when that reaches zero, we inform any shutdown
316
+ // callbacks that the platform is done as far as this Isolate is concerned.
317
+ self_reference_ = shared_from_this ();
318
+ uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
319
+ [](uv_handle_t * handle) {
320
+ std::unique_ptr<uv_async_t > flush_tasks{
321
+ reinterpret_cast <uv_async_t *>(handle)};
322
+ PerIsolatePlatformData* platform_data =
323
+ static_cast <PerIsolatePlatformData*>(flush_tasks->data );
324
+ platform_data->DecreaseHandleCount ();
325
+ platform_data->self_reference_ .reset ();
326
+ });
327
+ flush_tasks_ = nullptr ;
328
+ }
323
329
}
324
330
325
331
void PerIsolatePlatformData::DecreaseHandleCount () {
@@ -465,39 +471,48 @@ void NodePlatform::DrainTasks(Isolate* isolate) {
465
471
bool PerIsolatePlatformData::FlushForegroundTasksInternal () {
466
472
bool did_work = false ;
467
473
468
- while (std::unique_ptr<DelayedTask> delayed =
469
- foreground_delayed_tasks_.Pop ()) {
474
+ std::queue<std::unique_ptr<DelayedTask>> delayed_tasks_to_schedule =
475
+ foreground_delayed_tasks_.Lock ().PopAll ();
476
+ while (!delayed_tasks_to_schedule.empty ()) {
477
+ std::unique_ptr<DelayedTask> delayed =
478
+ std::move (delayed_tasks_to_schedule.front ());
479
+ delayed_tasks_to_schedule.pop ();
480
+
470
481
did_work = true ;
471
482
uint64_t delay_millis = llround (delayed->timeout * 1000 );
472
483
473
484
delayed->timer .data = static_cast <void *>(delayed.get ());
474
485
uv_timer_init (loop_, &delayed->timer );
475
- // Timers may not guarantee queue ordering of events with the same delay if
476
- // the delay is non-zero. This should not be a problem in practice.
486
+ // Timers may not guarantee queue ordering of events with the same delay
487
+ // if the delay is non-zero. This should not be a problem in practice.
477
488
uv_timer_start (&delayed->timer , RunForegroundTask, delay_millis, 0 );
478
489
uv_unref (reinterpret_cast <uv_handle_t *>(&delayed->timer ));
479
490
uv_handle_count_++;
480
491
481
- scheduled_delayed_tasks_.emplace_back (delayed.release (),
482
- [](DelayedTask* delayed) {
483
- uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
484
- [](uv_handle_t * handle) {
485
- std::unique_ptr<DelayedTask> task {
486
- static_cast <DelayedTask*>(handle->data ) };
487
- task->platform_data ->DecreaseHandleCount ();
488
- });
489
- });
492
+ scheduled_delayed_tasks_.emplace_back (
493
+ delayed.release (), [](DelayedTask* delayed) {
494
+ uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
495
+ [](uv_handle_t * handle) {
496
+ std::unique_ptr<DelayedTask> task{
497
+ static_cast <DelayedTask*>(handle->data )};
498
+ task->platform_data ->DecreaseHandleCount ();
499
+ });
500
+ });
501
+ }
502
+
503
+ std::queue<std::unique_ptr<Task>> tasks;
504
+ {
505
+ auto locked = foreground_tasks_.Lock ();
506
+ tasks = locked.PopAll ();
490
507
}
491
- // Move all foreground tasks into a separate queue and flush that queue.
492
- // This way tasks that are posted while flushing the queue will be run on the
493
- // next call of FlushForegroundTasksInternal.
494
- std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll ();
508
+
495
509
while (!tasks.empty ()) {
496
510
std::unique_ptr<Task> task = std::move (tasks.front ());
497
511
tasks.pop ();
498
512
did_work = true ;
499
513
RunForegroundTask (std::move (task));
500
514
}
515
+
501
516
return did_work;
502
517
}
503
518
@@ -587,66 +602,63 @@ TaskQueue<T>::TaskQueue()
587
602
outstanding_tasks_(0 ), stopped_(false ), task_queue_() { }
588
603
589
604
template <class T >
590
- void TaskQueue<T>::Push(std::unique_ptr<T> task) {
591
- Mutex::ScopedLock scoped_lock (lock_);
592
- outstanding_tasks_++;
593
- task_queue_.push (std::move (task));
594
- tasks_available_.Signal (scoped_lock);
605
+ TaskQueue<T>::Locked::Locked(TaskQueue* queue)
606
+ : queue_(queue), lock_(queue->lock_) {}
607
+
608
+ template <class T >
609
+ void TaskQueue<T>::Locked::Push(std::unique_ptr<T> task) {
610
+ queue_->outstanding_tasks_ ++;
611
+ queue_->task_queue_ .push (std::move (task));
612
+ queue_->tasks_available_ .Signal (lock_);
595
613
}
596
614
597
615
template <class T >
598
- std::unique_ptr<T> TaskQueue<T>::Pop() {
599
- Mutex::ScopedLock scoped_lock (lock_);
600
- if (task_queue_.empty ()) {
616
+ std::unique_ptr<T> TaskQueue<T>::Locked::Pop() {
617
+ if (queue_->task_queue_ .empty ()) {
601
618
return std::unique_ptr<T>(nullptr );
602
619
}
603
- std::unique_ptr<T> result = std::move (task_queue_.front ());
604
- task_queue_.pop ();
620
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
621
+ queue_-> task_queue_ .pop ();
605
622
return result;
606
623
}
607
624
608
625
template <class T >
609
- std::unique_ptr<T> TaskQueue<T>::BlockingPop() {
610
- Mutex::ScopedLock scoped_lock (lock_);
611
- while (task_queue_.empty () && !stopped_) {
612
- tasks_available_.Wait (scoped_lock);
626
+ std::unique_ptr<T> TaskQueue<T>::Locked::BlockingPop() {
627
+ while (queue_->task_queue_ .empty () && !queue_->stopped_ ) {
628
+ queue_->tasks_available_ .Wait (lock_);
613
629
}
614
- if (stopped_) {
630
+ if (queue_-> stopped_ ) {
615
631
return std::unique_ptr<T>(nullptr );
616
632
}
617
- std::unique_ptr<T> result = std::move (task_queue_.front ());
618
- task_queue_.pop ();
633
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
634
+ queue_-> task_queue_ .pop ();
619
635
return result;
620
636
}
621
637
622
638
template <class T >
623
- void TaskQueue<T>::NotifyOfCompletion() {
624
- Mutex::ScopedLock scoped_lock (lock_);
625
- if (--outstanding_tasks_ == 0 ) {
626
- tasks_drained_.Broadcast (scoped_lock);
639
+ void TaskQueue<T>::Locked::NotifyOfCompletion() {
640
+ if (--queue_->outstanding_tasks_ == 0 ) {
641
+ queue_->tasks_drained_ .Broadcast (lock_);
627
642
}
628
643
}
629
644
630
645
template <class T >
631
- void TaskQueue<T>::BlockingDrain() {
632
- Mutex::ScopedLock scoped_lock (lock_);
633
- while (outstanding_tasks_ > 0 ) {
634
- tasks_drained_.Wait (scoped_lock);
646
+ void TaskQueue<T>::Locked::BlockingDrain() {
647
+ while (queue_->outstanding_tasks_ > 0 ) {
648
+ queue_->tasks_drained_ .Wait (lock_);
635
649
}
636
650
}
637
651
638
652
template <class T >
639
- void TaskQueue<T>::Stop() {
640
- Mutex::ScopedLock scoped_lock (lock_);
641
- stopped_ = true ;
642
- tasks_available_.Broadcast (scoped_lock);
653
+ void TaskQueue<T>::Locked::Stop() {
654
+ queue_->stopped_ = true ;
655
+ queue_->tasks_available_ .Broadcast (lock_);
643
656
}
644
657
645
658
template <class T >
646
- std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() {
647
- Mutex::ScopedLock scoped_lock (lock_);
659
+ std::queue<std::unique_ptr<T>> TaskQueue<T>::Locked::PopAll() {
648
660
std::queue<std::unique_ptr<T>> result;
649
- result.swap (task_queue_);
661
+ result.swap (queue_-> task_queue_ );
650
662
return result;
651
663
}
652
664
0 commit comments