@@ -559,6 +559,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
559
559
if (request == NULL )
560
560
return - ENOMEM ;
561
561
request -> ring = ring ;
562
+ } else {
563
+ WARN_ON (to != request -> ctx );
562
564
}
563
565
request -> ctx = to ;
564
566
request -> tail = tail ;
@@ -599,7 +601,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
599
601
return 0 ;
600
602
}
601
603
602
- static int logical_ring_invalidate_all_caches (struct intel_ringbuffer * ringbuf )
604
+ static int logical_ring_invalidate_all_caches (struct intel_ringbuffer * ringbuf ,
605
+ struct intel_context * ctx )
603
606
{
604
607
struct intel_engine_cs * ring = ringbuf -> ring ;
605
608
uint32_t flush_domains ;
@@ -609,7 +612,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
609
612
if (ring -> gpu_caches_dirty )
610
613
flush_domains = I915_GEM_GPU_DOMAINS ;
611
614
612
- ret = ring -> emit_flush (ringbuf , I915_GEM_GPU_DOMAINS , flush_domains );
615
+ ret = ring -> emit_flush (ringbuf , ctx ,
616
+ I915_GEM_GPU_DOMAINS , flush_domains );
613
617
if (ret )
614
618
return ret ;
615
619
@@ -618,6 +622,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
618
622
}
619
623
620
624
static int execlists_move_to_gpu (struct intel_ringbuffer * ringbuf ,
625
+ struct intel_context * ctx ,
621
626
struct list_head * vmas )
622
627
{
623
628
struct intel_engine_cs * ring = ringbuf -> ring ;
@@ -645,7 +650,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
645
650
/* Unconditionally invalidate gpu caches and ensure that we do flush
646
651
* any residual writes from the previous batch.
647
652
*/
648
- return logical_ring_invalidate_all_caches (ringbuf );
653
+ return logical_ring_invalidate_all_caches (ringbuf , ctx );
649
654
}
650
655
651
656
/**
@@ -725,13 +730,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
725
730
return - EINVAL ;
726
731
}
727
732
728
- ret = execlists_move_to_gpu (ringbuf , vmas );
733
+ ret = execlists_move_to_gpu (ringbuf , ctx , vmas );
729
734
if (ret )
730
735
return ret ;
731
736
732
737
if (ring == & dev_priv -> ring [RCS ] &&
733
738
instp_mode != dev_priv -> relative_constants_mode ) {
734
- ret = intel_logical_ring_begin (ringbuf , 4 );
739
+ ret = intel_logical_ring_begin (ringbuf , ctx , 4 );
735
740
if (ret )
736
741
return ret ;
737
742
@@ -744,7 +749,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
744
749
dev_priv -> relative_constants_mode = instp_mode ;
745
750
}
746
751
747
- ret = ring -> emit_bb_start (ringbuf , exec_start , flags );
752
+ ret = ring -> emit_bb_start (ringbuf , ctx , exec_start , flags );
748
753
if (ret )
749
754
return ret ;
750
755
@@ -807,15 +812,16 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
807
812
I915_WRITE_MODE (ring , _MASKED_BIT_DISABLE (STOP_RING ));
808
813
}
809
814
810
- int logical_ring_flush_all_caches (struct intel_ringbuffer * ringbuf )
815
+ int logical_ring_flush_all_caches (struct intel_ringbuffer * ringbuf ,
816
+ struct intel_context * ctx )
811
817
{
812
818
struct intel_engine_cs * ring = ringbuf -> ring ;
813
819
int ret ;
814
820
815
821
if (!ring -> gpu_caches_dirty )
816
822
return 0 ;
817
823
818
- ret = ring -> emit_flush (ringbuf , 0 , I915_GEM_GPU_DOMAINS );
824
+ ret = ring -> emit_flush (ringbuf , ctx , 0 , I915_GEM_GPU_DOMAINS );
819
825
if (ret )
820
826
return ret ;
821
827
@@ -833,10 +839,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
833
839
* point, the tail *inside* the context is updated and the ELSP written to.
834
840
*/
835
841
void intel_logical_ring_advance_and_submit (struct intel_ringbuffer * ringbuf ,
842
+ struct intel_context * ctx ,
836
843
struct drm_i915_gem_request * request )
837
844
{
838
845
struct intel_engine_cs * ring = ringbuf -> ring ;
839
- struct intel_context * ctx = ringbuf -> FIXME_lrc_ctx ;
840
846
841
847
intel_logical_ring_advance (ringbuf );
842
848
@@ -974,6 +980,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
974
980
}
975
981
976
982
static int logical_ring_wait_for_space (struct intel_ringbuffer * ringbuf ,
983
+ struct intel_context * ctx ,
977
984
int bytes )
978
985
{
979
986
struct intel_engine_cs * ring = ringbuf -> ring ;
@@ -987,7 +994,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
987
994
return ret ;
988
995
989
996
/* Force the context submission in case we have been skipping it */
990
- intel_logical_ring_advance_and_submit (ringbuf , NULL );
997
+ intel_logical_ring_advance_and_submit (ringbuf , ctx , NULL );
991
998
992
999
/* With GEM the hangcheck timer should kick us out of the loop,
993
1000
* leaving it early runs the risk of corrupting GEM state (due
@@ -1022,13 +1029,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
1022
1029
return ret ;
1023
1030
}
1024
1031
1025
- static int logical_ring_wrap_buffer (struct intel_ringbuffer * ringbuf )
1032
+ static int logical_ring_wrap_buffer (struct intel_ringbuffer * ringbuf ,
1033
+ struct intel_context * ctx )
1026
1034
{
1027
1035
uint32_t __iomem * virt ;
1028
1036
int rem = ringbuf -> size - ringbuf -> tail ;
1029
1037
1030
1038
if (ringbuf -> space < rem ) {
1031
- int ret = logical_ring_wait_for_space (ringbuf , rem );
1039
+ int ret = logical_ring_wait_for_space (ringbuf , ctx , rem );
1032
1040
1033
1041
if (ret )
1034
1042
return ret ;
@@ -1045,18 +1053,19 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
1045
1053
return 0 ;
1046
1054
}
1047
1055
1048
- static int logical_ring_prepare (struct intel_ringbuffer * ringbuf , int bytes )
1056
+ static int logical_ring_prepare (struct intel_ringbuffer * ringbuf ,
1057
+ struct intel_context * ctx , int bytes )
1049
1058
{
1050
1059
int ret ;
1051
1060
1052
1061
if (unlikely (ringbuf -> tail + bytes > ringbuf -> effective_size )) {
1053
- ret = logical_ring_wrap_buffer (ringbuf );
1062
+ ret = logical_ring_wrap_buffer (ringbuf , ctx );
1054
1063
if (unlikely (ret ))
1055
1064
return ret ;
1056
1065
}
1057
1066
1058
1067
if (unlikely (ringbuf -> space < bytes )) {
1059
- ret = logical_ring_wait_for_space (ringbuf , bytes );
1068
+ ret = logical_ring_wait_for_space (ringbuf , ctx , bytes );
1060
1069
if (unlikely (ret ))
1061
1070
return ret ;
1062
1071
}
@@ -1077,7 +1086,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
1077
1086
*
1078
1087
* Return: non-zero if the ringbuffer is not ready to be written to.
1079
1088
*/
1080
- int intel_logical_ring_begin (struct intel_ringbuffer * ringbuf , int num_dwords )
1089
+ int intel_logical_ring_begin (struct intel_ringbuffer * ringbuf ,
1090
+ struct intel_context * ctx , int num_dwords )
1081
1091
{
1082
1092
struct intel_engine_cs * ring = ringbuf -> ring ;
1083
1093
struct drm_device * dev = ring -> dev ;
@@ -1089,12 +1099,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
1089
1099
if (ret )
1090
1100
return ret ;
1091
1101
1092
- ret = logical_ring_prepare (ringbuf , num_dwords * sizeof (uint32_t ));
1102
+ ret = logical_ring_prepare (ringbuf , ctx , num_dwords * sizeof (uint32_t ));
1093
1103
if (ret )
1094
1104
return ret ;
1095
1105
1096
1106
/* Preallocate the olr before touching the ring */
1097
- ret = logical_ring_alloc_request (ring , ringbuf -> FIXME_lrc_ctx );
1107
+ ret = logical_ring_alloc_request (ring , ctx );
1098
1108
if (ret )
1099
1109
return ret ;
1100
1110
@@ -1115,11 +1125,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
1115
1125
return 0 ;
1116
1126
1117
1127
ring -> gpu_caches_dirty = true;
1118
- ret = logical_ring_flush_all_caches (ringbuf );
1128
+ ret = logical_ring_flush_all_caches (ringbuf , ctx );
1119
1129
if (ret )
1120
1130
return ret ;
1121
1131
1122
- ret = intel_logical_ring_begin (ringbuf , w -> count * 2 + 2 );
1132
+ ret = intel_logical_ring_begin (ringbuf , ctx , w -> count * 2 + 2 );
1123
1133
if (ret )
1124
1134
return ret ;
1125
1135
@@ -1133,7 +1143,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
1133
1143
intel_logical_ring_advance (ringbuf );
1134
1144
1135
1145
ring -> gpu_caches_dirty = true;
1136
- ret = logical_ring_flush_all_caches (ringbuf );
1146
+ ret = logical_ring_flush_all_caches (ringbuf , ctx );
1137
1147
if (ret )
1138
1148
return ret ;
1139
1149
@@ -1184,12 +1194,13 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1184
1194
}
1185
1195
1186
1196
static int gen8_emit_bb_start (struct intel_ringbuffer * ringbuf ,
1197
+ struct intel_context * ctx ,
1187
1198
u64 offset , unsigned flags )
1188
1199
{
1189
1200
bool ppgtt = !(flags & I915_DISPATCH_SECURE );
1190
1201
int ret ;
1191
1202
1192
- ret = intel_logical_ring_begin (ringbuf , 4 );
1203
+ ret = intel_logical_ring_begin (ringbuf , ctx , 4 );
1193
1204
if (ret )
1194
1205
return ret ;
1195
1206
@@ -1237,6 +1248,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
1237
1248
}
1238
1249
1239
1250
static int gen8_emit_flush (struct intel_ringbuffer * ringbuf ,
1251
+ struct intel_context * ctx ,
1240
1252
u32 invalidate_domains ,
1241
1253
u32 unused )
1242
1254
{
@@ -1246,7 +1258,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
1246
1258
uint32_t cmd ;
1247
1259
int ret ;
1248
1260
1249
- ret = intel_logical_ring_begin (ringbuf , 4 );
1261
+ ret = intel_logical_ring_begin (ringbuf , ctx , 4 );
1250
1262
if (ret )
1251
1263
return ret ;
1252
1264
@@ -1275,6 +1287,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
1275
1287
}
1276
1288
1277
1289
static int gen8_emit_flush_render (struct intel_ringbuffer * ringbuf ,
1290
+ struct intel_context * ctx ,
1278
1291
u32 invalidate_domains ,
1279
1292
u32 flush_domains )
1280
1293
{
@@ -1301,7 +1314,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
1301
1314
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB ;
1302
1315
}
1303
1316
1304
- ret = intel_logical_ring_begin (ringbuf , 6 );
1317
+ ret = intel_logical_ring_begin (ringbuf , ctx , 6 );
1305
1318
if (ret )
1306
1319
return ret ;
1307
1320
@@ -1333,7 +1346,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
1333
1346
u32 cmd ;
1334
1347
int ret ;
1335
1348
1336
- ret = intel_logical_ring_begin (ringbuf , 6 );
1349
+ ret = intel_logical_ring_begin (ringbuf , request -> ctx , 6 );
1337
1350
if (ret )
1338
1351
return ret ;
1339
1352
@@ -1349,7 +1362,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
1349
1362
i915_gem_request_get_seqno (ring -> outstanding_lazy_request ));
1350
1363
intel_logical_ring_emit (ringbuf , MI_USER_INTERRUPT );
1351
1364
intel_logical_ring_emit (ringbuf , MI_NOOP );
1352
- intel_logical_ring_advance_and_submit (ringbuf , request );
1365
+ intel_logical_ring_advance_and_submit (ringbuf , request -> ctx , request );
1353
1366
1354
1367
return 0 ;
1355
1368
}
@@ -1636,6 +1649,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
1636
1649
return 0 ;
1637
1650
1638
1651
ret = ring -> emit_bb_start (ringbuf ,
1652
+ ctx ,
1639
1653
so .ggtt_offset ,
1640
1654
I915_DISPATCH_SECURE );
1641
1655
if (ret )
@@ -1892,7 +1906,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1892
1906
}
1893
1907
1894
1908
ringbuf -> ring = ring ;
1895
- ringbuf -> FIXME_lrc_ctx = ctx ;
1896
1909
1897
1910
ringbuf -> size = 32 * PAGE_SIZE ;
1898
1911
ringbuf -> effective_size = ringbuf -> size ;
0 commit comments