@@ -5289,21 +5289,22 @@ static int exclusive_event_init(struct perf_event *event)
5289
5289
return - EBUSY ;
5290
5290
}
5291
5291
5292
+ event -> attach_state |= PERF_ATTACH_EXCLUSIVE ;
5293
+
5292
5294
return 0 ;
5293
5295
}
5294
5296
5295
5297
static void exclusive_event_destroy (struct perf_event * event )
5296
5298
{
5297
5299
struct pmu * pmu = event -> pmu ;
5298
5300
5299
- if (!is_exclusive_pmu (pmu ))
5300
- return ;
5301
-
5302
5301
/* see comment in exclusive_event_init() */
5303
5302
if (event -> attach_state & PERF_ATTACH_TASK )
5304
5303
atomic_dec (& pmu -> exclusive_cnt );
5305
5304
else
5306
5305
atomic_inc (& pmu -> exclusive_cnt );
5306
+
5307
+ event -> attach_state &= ~PERF_ATTACH_EXCLUSIVE ;
5307
5308
}
5308
5309
5309
5310
static bool exclusive_event_match (struct perf_event * e1 , struct perf_event * e2 )
@@ -5362,40 +5363,20 @@ static void perf_pending_task_sync(struct perf_event *event)
5362
5363
rcuwait_wait_event (& event -> pending_work_wait , !event -> pending_work , TASK_UNINTERRUPTIBLE );
5363
5364
}
5364
5365
5365
- static void _free_event (struct perf_event * event )
5366
+ /* vs perf_event_alloc() error */
5367
+ static void __free_event (struct perf_event * event )
5366
5368
{
5367
- irq_work_sync (& event -> pending_irq );
5368
- irq_work_sync (& event -> pending_disable_irq );
5369
- perf_pending_task_sync (event );
5369
+ if (event -> attach_state & PERF_ATTACH_CALLCHAIN )
5370
+ put_callchain_buffers ();
5370
5371
5371
- unaccount_event (event );
5372
+ kfree (event -> addr_filter_ranges );
5372
5373
5373
- security_perf_event_free (event );
5374
-
5375
- if (event -> rb ) {
5376
- /*
5377
- * Can happen when we close an event with re-directed output.
5378
- *
5379
- * Since we have a 0 refcount, perf_mmap_close() will skip
5380
- * over us; possibly making our ring_buffer_put() the last.
5381
- */
5382
- mutex_lock (& event -> mmap_mutex );
5383
- ring_buffer_attach (event , NULL );
5384
- mutex_unlock (& event -> mmap_mutex );
5385
- }
5374
+ if (event -> attach_state & PERF_ATTACH_EXCLUSIVE )
5375
+ exclusive_event_destroy (event );
5386
5376
5387
5377
if (is_cgroup_event (event ))
5388
5378
perf_detach_cgroup (event );
5389
5379
5390
- if (!event -> parent ) {
5391
- if (event -> attr .sample_type & PERF_SAMPLE_CALLCHAIN )
5392
- put_callchain_buffers ();
5393
- }
5394
-
5395
- perf_event_free_bpf_prog (event );
5396
- perf_addr_filters_splice (event , NULL );
5397
- kfree (event -> addr_filter_ranges );
5398
-
5399
5380
if (event -> destroy )
5400
5381
event -> destroy (event );
5401
5382
@@ -5406,22 +5387,58 @@ static void _free_event(struct perf_event *event)
5406
5387
if (event -> hw .target )
5407
5388
put_task_struct (event -> hw .target );
5408
5389
5409
- if (event -> pmu_ctx )
5390
+ if (event -> pmu_ctx ) {
5391
+ /*
5392
+ * put_pmu_ctx() needs an event->ctx reference, because of
5393
+ * epc->ctx.
5394
+ */
5395
+ WARN_ON_ONCE (!event -> ctx );
5396
+ WARN_ON_ONCE (event -> pmu_ctx -> ctx != event -> ctx );
5410
5397
put_pmu_ctx (event -> pmu_ctx );
5398
+ }
5411
5399
5412
5400
/*
5413
- * perf_event_free_task() relies on put_ctx() being 'last', in particular
5414
- * all task references must be cleaned up.
5401
+ * perf_event_free_task() relies on put_ctx() being 'last', in
5402
+ * particular all task references must be cleaned up.
5415
5403
*/
5416
5404
if (event -> ctx )
5417
5405
put_ctx (event -> ctx );
5418
5406
5419
- exclusive_event_destroy (event );
5420
- module_put (event -> pmu -> module );
5407
+ if (event -> pmu )
5408
+ module_put (event -> pmu -> module );
5421
5409
5422
5410
call_rcu (& event -> rcu_head , free_event_rcu );
5423
5411
}
5424
5412
5413
+ /* vs perf_event_alloc() success */
5414
+ static void _free_event (struct perf_event * event )
5415
+ {
5416
+ irq_work_sync (& event -> pending_irq );
5417
+ irq_work_sync (& event -> pending_disable_irq );
5418
+ perf_pending_task_sync (event );
5419
+
5420
+ unaccount_event (event );
5421
+
5422
+ security_perf_event_free (event );
5423
+
5424
+ if (event -> rb ) {
5425
+ /*
5426
+ * Can happen when we close an event with re-directed output.
5427
+ *
5428
+ * Since we have a 0 refcount, perf_mmap_close() will skip
5429
+ * over us; possibly making our ring_buffer_put() the last.
5430
+ */
5431
+ mutex_lock (& event -> mmap_mutex );
5432
+ ring_buffer_attach (event , NULL );
5433
+ mutex_unlock (& event -> mmap_mutex );
5434
+ }
5435
+
5436
+ perf_event_free_bpf_prog (event );
5437
+ perf_addr_filters_splice (event , NULL );
5438
+
5439
+ __free_event (event );
5440
+ }
5441
+
5425
5442
/*
5426
5443
* Used to free events which have a known refcount of 1, such as in error paths
5427
5444
* where the event isn't exposed yet and inherited events.
@@ -12093,8 +12110,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
12093
12110
event -> destroy (event );
12094
12111
}
12095
12112
12096
- if (ret )
12113
+ if (ret ) {
12114
+ event -> pmu = NULL ;
12097
12115
module_put (pmu -> module );
12116
+ }
12098
12117
12099
12118
return ret ;
12100
12119
}
@@ -12422,15 +12441,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12422
12441
* See perf_output_read().
12423
12442
*/
12424
12443
if (has_inherit_and_sample_read (attr ) && !(attr -> sample_type & PERF_SAMPLE_TID ))
12425
- goto err_ns ;
12444
+ goto err ;
12426
12445
12427
12446
if (!has_branch_stack (event ))
12428
12447
event -> attr .branch_sample_type = 0 ;
12429
12448
12430
12449
pmu = perf_init_event (event );
12431
12450
if (IS_ERR (pmu )) {
12432
12451
err = PTR_ERR (pmu );
12433
- goto err_ns ;
12452
+ goto err ;
12434
12453
}
12435
12454
12436
12455
/*
@@ -12440,46 +12459,46 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12440
12459
*/
12441
12460
if (pmu -> task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1 )) {
12442
12461
err = - EINVAL ;
12443
- goto err_pmu ;
12462
+ goto err ;
12444
12463
}
12445
12464
12446
12465
if (event -> attr .aux_output &&
12447
12466
(!(pmu -> capabilities & PERF_PMU_CAP_AUX_OUTPUT ) ||
12448
12467
event -> attr .aux_pause || event -> attr .aux_resume )) {
12449
12468
err = - EOPNOTSUPP ;
12450
- goto err_pmu ;
12469
+ goto err ;
12451
12470
}
12452
12471
12453
12472
if (event -> attr .aux_pause && event -> attr .aux_resume ) {
12454
12473
err = - EINVAL ;
12455
- goto err_pmu ;
12474
+ goto err ;
12456
12475
}
12457
12476
12458
12477
if (event -> attr .aux_start_paused ) {
12459
12478
if (!(pmu -> capabilities & PERF_PMU_CAP_AUX_PAUSE )) {
12460
12479
err = - EOPNOTSUPP ;
12461
- goto err_pmu ;
12480
+ goto err ;
12462
12481
}
12463
12482
event -> hw .aux_paused = 1 ;
12464
12483
}
12465
12484
12466
12485
if (cgroup_fd != -1 ) {
12467
12486
err = perf_cgroup_connect (cgroup_fd , event , attr , group_leader );
12468
12487
if (err )
12469
- goto err_pmu ;
12488
+ goto err ;
12470
12489
}
12471
12490
12472
12491
err = exclusive_event_init (event );
12473
12492
if (err )
12474
- goto err_pmu ;
12493
+ goto err ;
12475
12494
12476
12495
if (has_addr_filter (event )) {
12477
12496
event -> addr_filter_ranges = kcalloc (pmu -> nr_addr_filters ,
12478
12497
sizeof (struct perf_addr_filter_range ),
12479
12498
GFP_KERNEL );
12480
12499
if (!event -> addr_filter_ranges ) {
12481
12500
err = - ENOMEM ;
12482
- goto err_per_task ;
12501
+ goto err ;
12483
12502
}
12484
12503
12485
12504
/*
@@ -12504,41 +12523,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
12504
12523
if (event -> attr .sample_type & PERF_SAMPLE_CALLCHAIN ) {
12505
12524
err = get_callchain_buffers (attr -> sample_max_stack );
12506
12525
if (err )
12507
- goto err_addr_filters ;
12526
+ goto err ;
12527
+ event -> attach_state |= PERF_ATTACH_CALLCHAIN ;
12508
12528
}
12509
12529
}
12510
12530
12511
12531
err = security_perf_event_alloc (event );
12512
12532
if (err )
12513
- goto err_callchain_buffer ;
12533
+ goto err ;
12514
12534
12515
12535
/* symmetric to unaccount_event() in _free_event() */
12516
12536
account_event (event );
12517
12537
12518
12538
return event ;
12519
12539
12520
- err_callchain_buffer :
12521
- if (!event -> parent ) {
12522
- if (event -> attr .sample_type & PERF_SAMPLE_CALLCHAIN )
12523
- put_callchain_buffers ();
12524
- }
12525
- err_addr_filters :
12526
- kfree (event -> addr_filter_ranges );
12527
-
12528
- err_per_task :
12529
- exclusive_event_destroy (event );
12530
-
12531
- err_pmu :
12532
- if (is_cgroup_event (event ))
12533
- perf_detach_cgroup (event );
12534
- if (event -> destroy )
12535
- event -> destroy (event );
12536
- module_put (pmu -> module );
12537
- err_ns :
12538
- if (event -> hw .target )
12539
- put_task_struct (event -> hw .target );
12540
- call_rcu (& event -> rcu_head , free_event_rcu );
12541
-
12540
+ err :
12541
+ __free_event (event );
12542
12542
return ERR_PTR (err );
12543
12543
}
12544
12544
0 commit comments