Skip to content

Commit c70ca29

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/core: Simplify the perf_event_alloc() error path
The error cleanup sequence in perf_event_alloc() is a subset of the existing _free_event() function (it must of course be). Split this out into __free_event() and simplify the error path. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Ravi Bangoria <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 061c991 commit c70ca29

File tree

2 files changed

+78
-76
lines changed

2 files changed

+78
-76
lines changed

include/linux/perf_event.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -673,13 +673,15 @@ struct swevent_hlist {
673673
struct rcu_head rcu_head;
674674
};
675675

676-
#define PERF_ATTACH_CONTEXT 0x01
677-
#define PERF_ATTACH_GROUP 0x02
678-
#define PERF_ATTACH_TASK 0x04
679-
#define PERF_ATTACH_TASK_DATA 0x08
680-
#define PERF_ATTACH_ITRACE 0x10
681-
#define PERF_ATTACH_SCHED_CB 0x20
682-
#define PERF_ATTACH_CHILD 0x40
676+
#define PERF_ATTACH_CONTEXT 0x0001
677+
#define PERF_ATTACH_GROUP 0x0002
678+
#define PERF_ATTACH_TASK 0x0004
679+
#define PERF_ATTACH_TASK_DATA 0x0008
680+
#define PERF_ATTACH_ITRACE 0x0010
681+
#define PERF_ATTACH_SCHED_CB 0x0020
682+
#define PERF_ATTACH_CHILD 0x0040
683+
#define PERF_ATTACH_EXCLUSIVE 0x0080
684+
#define PERF_ATTACH_CALLCHAIN 0x0100
683685

684686
struct bpf_prog;
685687
struct perf_cgroup;

kernel/events/core.c

Lines changed: 69 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -5289,21 +5289,22 @@ static int exclusive_event_init(struct perf_event *event)
52895289
return -EBUSY;
52905290
}
52915291

5292+
event->attach_state |= PERF_ATTACH_EXCLUSIVE;
5293+
52925294
return 0;
52935295
}
52945296

52955297
static void exclusive_event_destroy(struct perf_event *event)
52965298
{
52975299
struct pmu *pmu = event->pmu;
52985300

5299-
if (!is_exclusive_pmu(pmu))
5300-
return;
5301-
53025301
/* see comment in exclusive_event_init() */
53035302
if (event->attach_state & PERF_ATTACH_TASK)
53045303
atomic_dec(&pmu->exclusive_cnt);
53055304
else
53065305
atomic_inc(&pmu->exclusive_cnt);
5306+
5307+
event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
53075308
}
53085309

53095310
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
@@ -5362,40 +5363,20 @@ static void perf_pending_task_sync(struct perf_event *event)
53625363
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
53635364
}
53645365

5365-
static void _free_event(struct perf_event *event)
5366+
/* vs perf_event_alloc() error */
5367+
static void __free_event(struct perf_event *event)
53665368
{
5367-
irq_work_sync(&event->pending_irq);
5368-
irq_work_sync(&event->pending_disable_irq);
5369-
perf_pending_task_sync(event);
5369+
if (event->attach_state & PERF_ATTACH_CALLCHAIN)
5370+
put_callchain_buffers();
53705371

5371-
unaccount_event(event);
5372+
kfree(event->addr_filter_ranges);
53725373

5373-
security_perf_event_free(event);
5374-
5375-
if (event->rb) {
5376-
/*
5377-
* Can happen when we close an event with re-directed output.
5378-
*
5379-
* Since we have a 0 refcount, perf_mmap_close() will skip
5380-
* over us; possibly making our ring_buffer_put() the last.
5381-
*/
5382-
mutex_lock(&event->mmap_mutex);
5383-
ring_buffer_attach(event, NULL);
5384-
mutex_unlock(&event->mmap_mutex);
5385-
}
5374+
if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
5375+
exclusive_event_destroy(event);
53865376

53875377
if (is_cgroup_event(event))
53885378
perf_detach_cgroup(event);
53895379

5390-
if (!event->parent) {
5391-
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
5392-
put_callchain_buffers();
5393-
}
5394-
5395-
perf_event_free_bpf_prog(event);
5396-
perf_addr_filters_splice(event, NULL);
5397-
kfree(event->addr_filter_ranges);
5398-
53995380
if (event->destroy)
54005381
event->destroy(event);
54015382

@@ -5406,22 +5387,58 @@ static void _free_event(struct perf_event *event)
54065387
if (event->hw.target)
54075388
put_task_struct(event->hw.target);
54085389

5409-
if (event->pmu_ctx)
5390+
if (event->pmu_ctx) {
5391+
/*
5392+
* put_pmu_ctx() needs an event->ctx reference, because of
5393+
* epc->ctx.
5394+
*/
5395+
WARN_ON_ONCE(!event->ctx);
5396+
WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
54105397
put_pmu_ctx(event->pmu_ctx);
5398+
}
54115399

54125400
/*
5413-
* perf_event_free_task() relies on put_ctx() being 'last', in particular
5414-
* all task references must be cleaned up.
5401+
* perf_event_free_task() relies on put_ctx() being 'last', in
5402+
* particular all task references must be cleaned up.
54155403
*/
54165404
if (event->ctx)
54175405
put_ctx(event->ctx);
54185406

5419-
exclusive_event_destroy(event);
5420-
module_put(event->pmu->module);
5407+
if (event->pmu)
5408+
module_put(event->pmu->module);
54215409

54225410
call_rcu(&event->rcu_head, free_event_rcu);
54235411
}
54245412

5413+
/* vs perf_event_alloc() success */
5414+
static void _free_event(struct perf_event *event)
5415+
{
5416+
irq_work_sync(&event->pending_irq);
5417+
irq_work_sync(&event->pending_disable_irq);
5418+
perf_pending_task_sync(event);
5419+
5420+
unaccount_event(event);
5421+
5422+
security_perf_event_free(event);
5423+
5424+
if (event->rb) {
5425+
/*
5426+
* Can happen when we close an event with re-directed output.
5427+
*
5428+
* Since we have a 0 refcount, perf_mmap_close() will skip
5429+
* over us; possibly making our ring_buffer_put() the last.
5430+
*/
5431+
mutex_lock(&event->mmap_mutex);
5432+
ring_buffer_attach(event, NULL);
5433+
mutex_unlock(&event->mmap_mutex);
5434+
}
5435+
5436+
perf_event_free_bpf_prog(event);
5437+
perf_addr_filters_splice(event, NULL);
5438+
5439+
__free_event(event);
5440+
}
5441+
54255442
/*
54265443
* Used to free events which have a known refcount of 1, such as in error paths
54275444
* where the event isn't exposed yet and inherited events.
@@ -12093,8 +12110,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
1209312110
event->destroy(event);
1209412111
}
1209512112

12096-
if (ret)
12113+
if (ret) {
12114+
event->pmu = NULL;
1209712115
module_put(pmu->module);
12116+
}
1209812117

1209912118
return ret;
1210012119
}
@@ -12422,15 +12441,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1242212441
* See perf_output_read().
1242312442
*/
1242412443
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
12425-
goto err_ns;
12444+
goto err;
1242612445

1242712446
if (!has_branch_stack(event))
1242812447
event->attr.branch_sample_type = 0;
1242912448

1243012449
pmu = perf_init_event(event);
1243112450
if (IS_ERR(pmu)) {
1243212451
err = PTR_ERR(pmu);
12433-
goto err_ns;
12452+
goto err;
1243412453
}
1243512454

1243612455
/*
@@ -12440,46 +12459,46 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1244012459
*/
1244112460
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
1244212461
err = -EINVAL;
12443-
goto err_pmu;
12462+
goto err;
1244412463
}
1244512464

1244612465
if (event->attr.aux_output &&
1244712466
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
1244812467
event->attr.aux_pause || event->attr.aux_resume)) {
1244912468
err = -EOPNOTSUPP;
12450-
goto err_pmu;
12469+
goto err;
1245112470
}
1245212471

1245312472
if (event->attr.aux_pause && event->attr.aux_resume) {
1245412473
err = -EINVAL;
12455-
goto err_pmu;
12474+
goto err;
1245612475
}
1245712476

1245812477
if (event->attr.aux_start_paused) {
1245912478
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
1246012479
err = -EOPNOTSUPP;
12461-
goto err_pmu;
12480+
goto err;
1246212481
}
1246312482
event->hw.aux_paused = 1;
1246412483
}
1246512484

1246612485
if (cgroup_fd != -1) {
1246712486
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
1246812487
if (err)
12469-
goto err_pmu;
12488+
goto err;
1247012489
}
1247112490

1247212491
err = exclusive_event_init(event);
1247312492
if (err)
12474-
goto err_pmu;
12493+
goto err;
1247512494

1247612495
if (has_addr_filter(event)) {
1247712496
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
1247812497
sizeof(struct perf_addr_filter_range),
1247912498
GFP_KERNEL);
1248012499
if (!event->addr_filter_ranges) {
1248112500
err = -ENOMEM;
12482-
goto err_per_task;
12501+
goto err;
1248312502
}
1248412503

1248512504
/*
@@ -12504,41 +12523,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1250412523
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
1250512524
err = get_callchain_buffers(attr->sample_max_stack);
1250612525
if (err)
12507-
goto err_addr_filters;
12526+
goto err;
12527+
event->attach_state |= PERF_ATTACH_CALLCHAIN;
1250812528
}
1250912529
}
1251012530

1251112531
err = security_perf_event_alloc(event);
1251212532
if (err)
12513-
goto err_callchain_buffer;
12533+
goto err;
1251412534

1251512535
/* symmetric to unaccount_event() in _free_event() */
1251612536
account_event(event);
1251712537

1251812538
return event;
1251912539

12520-
err_callchain_buffer:
12521-
if (!event->parent) {
12522-
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
12523-
put_callchain_buffers();
12524-
}
12525-
err_addr_filters:
12526-
kfree(event->addr_filter_ranges);
12527-
12528-
err_per_task:
12529-
exclusive_event_destroy(event);
12530-
12531-
err_pmu:
12532-
if (is_cgroup_event(event))
12533-
perf_detach_cgroup(event);
12534-
if (event->destroy)
12535-
event->destroy(event);
12536-
module_put(pmu->module);
12537-
err_ns:
12538-
if (event->hw.target)
12539-
put_task_struct(event->hw.target);
12540-
call_rcu(&event->rcu_head, free_event_rcu);
12541-
12540+
err:
12541+
__free_event(event);
1254212542
return ERR_PTR(err);
1254312543
}
1254412544

0 commit comments

Comments
 (0)