@@ -40,9 +40,10 @@ static void PlatformWorkerThread(void* data) {
40
40
worker_data->platform_workers_ready ->Signal (lock);
41
41
}
42
42
43
- while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop ()) {
43
+ while (std::unique_ptr<Task> task =
44
+ pending_worker_tasks->Lock ().BlockingPop ()) {
44
45
task->Run ();
45
- pending_worker_tasks->NotifyOfCompletion ();
46
+ pending_worker_tasks->Lock (). NotifyOfCompletion ();
46
47
}
47
48
}
48
49
@@ -73,13 +74,15 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
73
74
}
74
75
75
76
void PostDelayedTask (std::unique_ptr<Task> task, double delay_in_seconds) {
76
- tasks_.Push (std::make_unique<ScheduleTask>(this , std::move (task),
77
- delay_in_seconds));
77
+ auto locked = tasks_.Lock ();
78
+ locked.Push (std::make_unique<ScheduleTask>(
79
+ this , std::move (task), delay_in_seconds));
78
80
uv_async_send (&flush_tasks_);
79
81
}
80
82
81
83
void Stop () {
82
- tasks_.Push (std::make_unique<StopTask>(this ));
84
+ auto locked = tasks_.Lock ();
85
+ locked.Push (std::make_unique<StopTask>(this ));
83
86
uv_async_send (&flush_tasks_);
84
87
}
85
88
@@ -100,8 +103,13 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
100
103
static void FlushTasks (uv_async_t * flush_tasks) {
101
104
DelayedTaskScheduler* scheduler =
102
105
ContainerOf (&DelayedTaskScheduler::loop_, flush_tasks->loop );
103
- while (std::unique_ptr<Task> task = scheduler->tasks_ .Pop ())
106
+
107
+ std::queue<std::unique_ptr<Task>> tasks_to_run = scheduler->tasks_ .Lock ().PopAll ();
108
+ while (!tasks_to_run.empty ()) {
109
+ std::unique_ptr<Task> task = std::move (tasks_to_run.front ());
110
+ tasks_to_run.pop ();
104
111
task->Run ();
112
+ }
105
113
}
106
114
107
115
class StopTask : public Task {
@@ -149,7 +157,8 @@ class WorkerThreadsTaskRunner::DelayedTaskScheduler {
149
157
static void RunTask (uv_timer_t * timer) {
150
158
DelayedTaskScheduler* scheduler =
151
159
ContainerOf (&DelayedTaskScheduler::loop_, timer->loop );
152
- scheduler->pending_worker_tasks_ ->Push (scheduler->TakeTimerTask (timer));
160
+ scheduler->pending_worker_tasks_ ->Lock ().Push (
161
+ scheduler->TakeTimerTask (timer));
153
162
}
154
163
155
164
std::unique_ptr<Task> TakeTimerTask (uv_timer_t * timer) {
@@ -203,7 +212,7 @@ WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) {
203
212
}
204
213
205
214
void WorkerThreadsTaskRunner::PostTask (std::unique_ptr<Task> task) {
206
- pending_worker_tasks_.Push (std::move (task));
215
+ pending_worker_tasks_.Lock (). Push (std::move (task));
207
216
}
208
217
209
218
void WorkerThreadsTaskRunner::PostDelayedTask (std::unique_ptr<Task> task,
@@ -212,11 +221,11 @@ void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
212
221
}
213
222
214
223
void WorkerThreadsTaskRunner::BlockingDrain () {
215
- pending_worker_tasks_.BlockingDrain ();
224
+ pending_worker_tasks_.Lock (). BlockingDrain ();
216
225
}
217
226
218
227
void WorkerThreadsTaskRunner::Shutdown () {
219
- pending_worker_tasks_.Stop ();
228
+ pending_worker_tasks_.Lock (). Stop ();
220
229
delayed_task_scheduler_->Stop ();
221
230
for (size_t i = 0 ; i < threads_.size (); i++) {
222
231
CHECK_EQ (0 , uv_thread_join (threads_[i].get ()));
@@ -253,29 +262,23 @@ void PerIsolatePlatformData::PostIdleTaskImpl(
253
262
254
263
void PerIsolatePlatformData::PostTaskImpl (std::unique_ptr<Task> task,
255
264
const v8::SourceLocation& location) {
256
- if (flush_tasks_ == nullptr ) {
257
- // V8 may post tasks during Isolate disposal. In that case, the only
258
- // sensible path forward is to discard the task.
259
- return ;
260
- }
261
- foreground_tasks_.Push (std::move (task));
265
+ auto locked = foreground_tasks_.Lock ();
266
+ if (flush_tasks_ == nullptr ) return ;
267
+ locked.Push (std::move (task));
262
268
uv_async_send (flush_tasks_);
263
269
}
264
270
265
271
void PerIsolatePlatformData::PostDelayedTaskImpl (
266
272
std::unique_ptr<Task> task,
267
273
double delay_in_seconds,
268
274
const v8::SourceLocation& location) {
269
- if (flush_tasks_ == nullptr ) {
270
- // V8 may post tasks during Isolate disposal. In that case, the only
271
- // sensible path forward is to discard the task.
272
- return ;
273
- }
275
+ auto locked = foreground_delayed_tasks_.Lock ();
276
+ if (flush_tasks_ == nullptr ) return ;
274
277
std::unique_ptr<DelayedTask> delayed (new DelayedTask ());
275
278
delayed->task = std::move (task);
276
279
delayed->platform_data = shared_from_this ();
277
280
delayed->timeout = delay_in_seconds;
278
- foreground_delayed_tasks_ .Push (std::move (delayed));
281
+ locked .Push (std::move (delayed));
279
282
uv_async_send (flush_tasks_);
280
283
}
281
284
@@ -301,32 +304,30 @@ void PerIsolatePlatformData::AddShutdownCallback(void (*callback)(void*),
301
304
}
302
305
303
306
void PerIsolatePlatformData::Shutdown () {
304
- if (flush_tasks_ == nullptr )
305
- return ;
307
+ auto foreground_tasks_locked = foreground_tasks_. Lock ();
308
+ auto foreground_delayed_tasks_locked = foreground_delayed_tasks_. Lock () ;
306
309
307
- // While there should be no V8 tasks in the queues at this point, it is
308
- // possible that Node.js-internal tasks from e.g. the inspector are still
309
- // lying around. We clear these queues and ignore the return value,
310
- // effectively deleting the tasks instead of running them.
311
- foreground_delayed_tasks_.PopAll ();
312
- foreground_tasks_.PopAll ();
310
+ foreground_delayed_tasks_locked.PopAll ();
311
+ foreground_tasks_locked.PopAll ();
313
312
scheduled_delayed_tasks_.clear ();
314
313
315
- // Both destroying the scheduled_delayed_tasks_ lists and closing
316
- // flush_tasks_ handle add tasks to the event loop. We keep a count of all
317
- // non-closed handles, and when that reaches zero, we inform any shutdown
318
- // callbacks that the platform is done as far as this Isolate is concerned.
319
- self_reference_ = shared_from_this ();
320
- uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
321
- [](uv_handle_t * handle) {
322
- std::unique_ptr<uv_async_t > flush_tasks {
323
- reinterpret_cast <uv_async_t *>(handle) };
324
- PerIsolatePlatformData* platform_data =
325
- static_cast <PerIsolatePlatformData*>(flush_tasks->data );
326
- platform_data->DecreaseHandleCount ();
327
- platform_data->self_reference_ .reset ();
328
- });
329
- flush_tasks_ = nullptr ;
314
+ if (flush_tasks_ != nullptr ) {
315
+ // Both destroying the scheduled_delayed_tasks_ lists and closing
316
+ // flush_tasks_ handle add tasks to the event loop. We keep a count of all
317
+ // non-closed handles, and when that reaches zero, we inform any shutdown
318
+ // callbacks that the platform is done as far as this Isolate is concerned.
319
+ self_reference_ = shared_from_this ();
320
+ uv_close (reinterpret_cast <uv_handle_t *>(flush_tasks_),
321
+ [](uv_handle_t * handle) {
322
+ std::unique_ptr<uv_async_t > flush_tasks{
323
+ reinterpret_cast <uv_async_t *>(handle)};
324
+ PerIsolatePlatformData* platform_data =
325
+ static_cast <PerIsolatePlatformData*>(flush_tasks->data );
326
+ platform_data->DecreaseHandleCount ();
327
+ platform_data->self_reference_ .reset ();
328
+ });
329
+ flush_tasks_ = nullptr ;
330
+ }
330
331
}
331
332
332
333
void PerIsolatePlatformData::DecreaseHandleCount () {
@@ -472,39 +473,46 @@ void NodePlatform::DrainTasks(Isolate* isolate) {
472
473
bool PerIsolatePlatformData::FlushForegroundTasksInternal () {
473
474
bool did_work = false ;
474
475
475
- while (std::unique_ptr<DelayedTask> delayed =
476
- foreground_delayed_tasks_.Pop ()) {
476
+ std::queue<std::unique_ptr<DelayedTask>> delayed_tasks_to_schedule = foreground_delayed_tasks_.Lock ().PopAll ();
477
+ while (!delayed_tasks_to_schedule.empty ()) {
478
+ std::unique_ptr<DelayedTask> delayed = std::move (delayed_tasks_to_schedule.front ());
479
+ delayed_tasks_to_schedule.pop ();
480
+
477
481
did_work = true ;
478
482
uint64_t delay_millis = llround (delayed->timeout * 1000 );
479
483
480
484
delayed->timer .data = static_cast <void *>(delayed.get ());
481
485
uv_timer_init (loop_, &delayed->timer );
482
- // Timers may not guarantee queue ordering of events with the same delay if
483
- // the delay is non-zero. This should not be a problem in practice.
486
+ // Timers may not guarantee queue ordering of events with the same delay
487
+ // if the delay is non-zero. This should not be a problem in practice.
484
488
uv_timer_start (&delayed->timer , RunForegroundTask, delay_millis, 0 );
485
489
uv_unref (reinterpret_cast <uv_handle_t *>(&delayed->timer ));
486
490
uv_handle_count_++;
487
491
488
- scheduled_delayed_tasks_.emplace_back (delayed.release (),
489
- [](DelayedTask* delayed) {
490
- uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
491
- [](uv_handle_t * handle) {
492
- std::unique_ptr<DelayedTask> task {
493
- static_cast <DelayedTask*>(handle->data ) };
494
- task->platform_data ->DecreaseHandleCount ();
495
- });
496
- });
492
+ scheduled_delayed_tasks_.emplace_back (
493
+ delayed.release (), [](DelayedTask* delayed) {
494
+ uv_close (reinterpret_cast <uv_handle_t *>(&delayed->timer ),
495
+ [](uv_handle_t * handle) {
496
+ std::unique_ptr<DelayedTask> task{
497
+ static_cast <DelayedTask*>(handle->data )};
498
+ task->platform_data ->DecreaseHandleCount ();
499
+ });
500
+ });
501
+ }
502
+
503
+ std::queue<std::unique_ptr<Task>> tasks;
504
+ {
505
+ auto locked = foreground_tasks_.Lock ();
506
+ tasks = locked.PopAll ();
497
507
}
498
- // Move all foreground tasks into a separate queue and flush that queue.
499
- // This way tasks that are posted while flushing the queue will be run on the
500
- // next call of FlushForegroundTasksInternal.
501
- std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll ();
508
+
502
509
while (!tasks.empty ()) {
503
510
std::unique_ptr<Task> task = std::move (tasks.front ());
504
511
tasks.pop ();
505
512
did_work = true ;
506
513
RunForegroundTask (std::move (task));
507
514
}
515
+
508
516
return did_work;
509
517
}
510
518
@@ -594,66 +602,63 @@ TaskQueue<T>::TaskQueue()
594
602
outstanding_tasks_(0 ), stopped_(false ), task_queue_() { }
595
603
596
604
template <class T >
597
- void TaskQueue<T>::Push(std::unique_ptr<T> task) {
598
- Mutex::ScopedLock scoped_lock (lock_);
599
- outstanding_tasks_++;
600
- task_queue_.push (std::move (task));
601
- tasks_available_.Signal (scoped_lock);
605
+ TaskQueue<T>::Locked::Locked(TaskQueue* queue)
606
+ : queue_(queue), lock_(queue->lock_) {}
607
+
608
+ template <class T >
609
+ void TaskQueue<T>::Locked::Push(std::unique_ptr<T> task) {
610
+ queue_->outstanding_tasks_ ++;
611
+ queue_->task_queue_ .push (std::move (task));
612
+ queue_->tasks_available_ .Signal (lock_);
602
613
}
603
614
604
615
template <class T >
605
- std::unique_ptr<T> TaskQueue<T>::Pop() {
606
- Mutex::ScopedLock scoped_lock (lock_);
607
- if (task_queue_.empty ()) {
616
+ std::unique_ptr<T> TaskQueue<T>::Locked::Pop() {
617
+ if (queue_->task_queue_ .empty ()) {
608
618
return std::unique_ptr<T>(nullptr );
609
619
}
610
- std::unique_ptr<T> result = std::move (task_queue_.front ());
611
- task_queue_.pop ();
620
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
621
+ queue_-> task_queue_ .pop ();
612
622
return result;
613
623
}
614
624
615
625
template <class T >
616
- std::unique_ptr<T> TaskQueue<T>::BlockingPop() {
617
- Mutex::ScopedLock scoped_lock (lock_);
618
- while (task_queue_.empty () && !stopped_) {
619
- tasks_available_.Wait (scoped_lock);
626
+ std::unique_ptr<T> TaskQueue<T>::Locked::BlockingPop() {
627
+ while (queue_->task_queue_ .empty () && !queue_->stopped_ ) {
628
+ queue_->tasks_available_ .Wait (lock_);
620
629
}
621
- if (stopped_) {
630
+ if (queue_-> stopped_ ) {
622
631
return std::unique_ptr<T>(nullptr );
623
632
}
624
- std::unique_ptr<T> result = std::move (task_queue_.front ());
625
- task_queue_.pop ();
633
+ std::unique_ptr<T> result = std::move (queue_-> task_queue_ .front ());
634
+ queue_-> task_queue_ .pop ();
626
635
return result;
627
636
}
628
637
629
638
template <class T >
630
- void TaskQueue<T>::NotifyOfCompletion() {
631
- Mutex::ScopedLock scoped_lock (lock_);
632
- if (--outstanding_tasks_ == 0 ) {
633
- tasks_drained_.Broadcast (scoped_lock);
639
+ void TaskQueue<T>::Locked::NotifyOfCompletion() {
640
+ if (--queue_->outstanding_tasks_ == 0 ) {
641
+ queue_->tasks_drained_ .Broadcast (lock_);
634
642
}
635
643
}
636
644
637
645
template <class T >
638
- void TaskQueue<T>::BlockingDrain() {
639
- Mutex::ScopedLock scoped_lock (lock_);
640
- while (outstanding_tasks_ > 0 ) {
641
- tasks_drained_.Wait (scoped_lock);
646
+ void TaskQueue<T>::Locked::BlockingDrain() {
647
+ while (queue_->outstanding_tasks_ > 0 ) {
648
+ queue_->tasks_drained_ .Wait (lock_);
642
649
}
643
650
}
644
651
645
652
template <class T >
646
- void TaskQueue<T>::Stop() {
647
- Mutex::ScopedLock scoped_lock (lock_);
648
- stopped_ = true ;
649
- tasks_available_.Broadcast (scoped_lock);
653
+ void TaskQueue<T>::Locked::Stop() {
654
+ queue_->stopped_ = true ;
655
+ queue_->tasks_available_ .Broadcast (lock_);
650
656
}
651
657
652
658
template <class T >
653
- std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() {
654
- Mutex::ScopedLock scoped_lock (lock_);
659
+ std::queue<std::unique_ptr<T>> TaskQueue<T>::Locked::PopAll() {
655
660
std::queue<std::unique_ptr<T>> result;
656
- result.swap (task_queue_);
661
+ result.swap (queue_-> task_queue_ );
657
662
return result;
658
663
}
659
664
0 commit comments