@@ -404,8 +404,8 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
404
404
405
405
static void execlists_context_unqueue (struct intel_engine_cs * ring )
406
406
{
407
- struct intel_ctx_submit_request * req0 = NULL , * req1 = NULL ;
408
- struct intel_ctx_submit_request * cursor = NULL , * tmp = NULL ;
407
+ struct drm_i915_gem_request * req0 = NULL , * req1 = NULL ;
408
+ struct drm_i915_gem_request * cursor = NULL , * tmp = NULL ;
409
409
410
410
assert_spin_locked (& ring -> execlist_lock );
411
411
@@ -417,7 +417,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
417
417
execlist_link ) {
418
418
if (!req0 ) {
419
419
req0 = cursor ;
420
- } else if (req0 -> request -> ctx == cursor -> request -> ctx ) {
420
+ } else if (req0 -> ctx == cursor -> ctx ) {
421
421
/* Same ctx: ignore first request, as second request
422
422
* will update tail past first request's workload */
423
423
cursor -> elsp_submitted = req0 -> elsp_submitted ;
@@ -433,9 +433,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
433
433
434
434
WARN_ON (req1 && req1 -> elsp_submitted );
435
435
436
- execlists_submit_contexts (ring , req0 -> request -> ctx , req0 -> request -> tail ,
437
- req1 ? req1 -> request -> ctx : NULL ,
438
- req1 ? req1 -> request -> tail : 0 );
436
+ execlists_submit_contexts (ring , req0 -> ctx , req0 -> tail ,
437
+ req1 ? req1 -> ctx : NULL ,
438
+ req1 ? req1 -> tail : 0 );
439
439
440
440
req0 -> elsp_submitted ++ ;
441
441
if (req1 )
@@ -445,17 +445,17 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
445
445
static bool execlists_check_remove_request (struct intel_engine_cs * ring ,
446
446
u32 request_id )
447
447
{
448
- struct intel_ctx_submit_request * head_req ;
448
+ struct drm_i915_gem_request * head_req ;
449
449
450
450
assert_spin_locked (& ring -> execlist_lock );
451
451
452
452
head_req = list_first_entry_or_null (& ring -> execlist_queue ,
453
- struct intel_ctx_submit_request ,
453
+ struct drm_i915_gem_request ,
454
454
execlist_link );
455
455
456
456
if (head_req != NULL ) {
457
457
struct drm_i915_gem_object * ctx_obj =
458
- head_req -> request -> ctx -> engine [ring -> id ].state ;
458
+ head_req -> ctx -> engine [ring -> id ].state ;
459
459
if (intel_execlists_ctx_id (ctx_obj ) == request_id ) {
460
460
WARN (head_req -> elsp_submitted == 0 ,
461
461
"Never submitted head request\n" );
@@ -537,15 +537,11 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
537
537
u32 tail ,
538
538
struct drm_i915_gem_request * request )
539
539
{
540
- struct intel_ctx_submit_request * req = NULL , * cursor ;
540
+ struct drm_i915_gem_request * cursor ;
541
541
struct drm_i915_private * dev_priv = ring -> dev -> dev_private ;
542
542
unsigned long flags ;
543
543
int num_elements = 0 ;
544
544
545
- req = kzalloc (sizeof (* req ), GFP_KERNEL );
546
- if (req == NULL )
547
- return - ENOMEM ;
548
-
549
545
if (to != ring -> default_context )
550
546
intel_lr_context_pin (ring , to );
551
547
@@ -559,14 +555,13 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
559
555
if (request == NULL )
560
556
return - ENOMEM ;
561
557
request -> ring = ring ;
558
+ request -> ctx = to ;
562
559
} else {
563
560
WARN_ON (to != request -> ctx );
564
561
}
565
- request -> ctx = to ;
566
562
request -> tail = tail ;
567
- req -> request = request ;
568
563
i915_gem_request_reference (request );
569
- i915_gem_context_reference (req -> request -> ctx );
564
+ i915_gem_context_reference (request -> ctx );
570
565
571
566
intel_runtime_pm_get (dev_priv );
572
567
@@ -577,13 +572,13 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
577
572
break ;
578
573
579
574
if (num_elements > 2 ) {
580
- struct intel_ctx_submit_request * tail_req ;
575
+ struct drm_i915_gem_request * tail_req ;
581
576
582
577
tail_req = list_last_entry (& ring -> execlist_queue ,
583
- struct intel_ctx_submit_request ,
578
+ struct drm_i915_gem_request ,
584
579
execlist_link );
585
580
586
- if (to == tail_req -> request -> ctx ) {
581
+ if (to == tail_req -> ctx ) {
587
582
WARN (tail_req -> elsp_submitted != 0 ,
588
583
"More than 2 already-submitted reqs queued\n" );
589
584
list_del (& tail_req -> execlist_link );
@@ -592,7 +587,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
592
587
}
593
588
}
594
589
595
- list_add_tail (& req -> execlist_link , & ring -> execlist_queue );
590
+ list_add_tail (& request -> execlist_link , & ring -> execlist_queue );
596
591
if (num_elements == 0 )
597
592
execlists_context_unqueue (ring );
598
593
@@ -761,7 +756,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
761
756
762
757
void intel_execlists_retire_requests (struct intel_engine_cs * ring )
763
758
{
764
- struct intel_ctx_submit_request * req , * tmp ;
759
+ struct drm_i915_gem_request * req , * tmp ;
765
760
struct drm_i915_private * dev_priv = ring -> dev -> dev_private ;
766
761
unsigned long flags ;
767
762
struct list_head retired_list ;
@@ -776,17 +771,16 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
776
771
spin_unlock_irqrestore (& ring -> execlist_lock , flags );
777
772
778
773
list_for_each_entry_safe (req , tmp , & retired_list , execlist_link ) {
779
- struct intel_context * ctx = req -> request -> ctx ;
774
+ struct intel_context * ctx = req -> ctx ;
780
775
struct drm_i915_gem_object * ctx_obj =
781
776
ctx -> engine [ring -> id ].state ;
782
777
783
778
if (ctx_obj && (ctx != ring -> default_context ))
784
779
intel_lr_context_unpin (ring , ctx );
785
780
intel_runtime_pm_put (dev_priv );
786
781
i915_gem_context_unreference (ctx );
787
- i915_gem_request_unreference (req -> request );
782
+ i915_gem_request_unreference (req );
788
783
list_del (& req -> execlist_link );
789
- kfree (req );
790
784
}
791
785
}
792
786
0 commit comments