68
68
* fence is used to stall all requests associated with this guc_id until the
69
69
* corresponding G2H returns indicating the guc_id has been deregistered.
70
70
*
71
- * guc_ids:
71
+ * submission_state. guc_ids:
72
72
* Unique number associated with private GuC context data passed in during
73
73
* context registration / submission / deregistration. 64k available. Simple ida
74
74
* is used for allocation.
89
89
* sched_engine can be submitting at a time. Currently only one sched_engine is
90
90
* used for all of GuC submission but that could change in the future.
91
91
*
92
- * guc->contexts_lock
92
+ * guc->submission_state.lock
93
93
* Protects guc_id allocation for the given GuC, i.e. only one context can be
94
94
* doing guc_id allocation operations at a time for each GuC in the system.
95
95
*
103
103
*
104
104
* Lock ordering rules:
105
105
* sched_engine->lock -> ce->guc_state.lock
106
- * guc->contexts_lock -> ce->guc_state.lock
106
+ * guc->submission_state.lock -> ce->guc_state.lock
107
107
*
108
108
* Reset races:
109
109
* When a full GT reset is triggered it is assumed that some G2H responses to
@@ -1148,9 +1148,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
1148
1148
1149
1149
xa_init_flags (& guc -> context_lookup , XA_FLAGS_LOCK_IRQ );
1150
1150
1151
- spin_lock_init (& guc -> contexts_lock );
1152
- INIT_LIST_HEAD (& guc -> guc_id_list );
1153
- ida_init (& guc -> guc_ids );
1151
+ spin_lock_init (& guc -> submission_state . lock );
1152
+ INIT_LIST_HEAD (& guc -> submission_state . guc_id_list );
1153
+ ida_init (& guc -> submission_state . guc_ids );
1154
1154
1155
1155
return 0 ;
1156
1156
}
@@ -1215,15 +1215,16 @@ static void guc_submit_request(struct i915_request *rq)
1215
1215
1216
1216
static int new_guc_id (struct intel_guc * guc )
1217
1217
{
1218
- return ida_simple_get (& guc -> guc_ids , 0 ,
1218
+ return ida_simple_get (& guc -> submission_state . guc_ids , 0 ,
1219
1219
GUC_MAX_LRC_DESCRIPTORS , GFP_KERNEL |
1220
1220
__GFP_RETRY_MAYFAIL | __GFP_NOWARN );
1221
1221
}
1222
1222
1223
1223
static void __release_guc_id (struct intel_guc * guc , struct intel_context * ce )
1224
1224
{
1225
1225
if (!context_guc_id_invalid (ce )) {
1226
- ida_simple_remove (& guc -> guc_ids , ce -> guc_id .id );
1226
+ ida_simple_remove (& guc -> submission_state .guc_ids ,
1227
+ ce -> guc_id .id );
1227
1228
reset_lrc_desc (guc , ce -> guc_id .id );
1228
1229
set_context_guc_id_invalid (ce );
1229
1230
}
@@ -1235,20 +1236,20 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1235
1236
{
1236
1237
unsigned long flags ;
1237
1238
1238
- spin_lock_irqsave (& guc -> contexts_lock , flags );
1239
+ spin_lock_irqsave (& guc -> submission_state . lock , flags );
1239
1240
__release_guc_id (guc , ce );
1240
- spin_unlock_irqrestore (& guc -> contexts_lock , flags );
1241
+ spin_unlock_irqrestore (& guc -> submission_state . lock , flags );
1241
1242
}
1242
1243
1243
1244
static int steal_guc_id (struct intel_guc * guc )
1244
1245
{
1245
1246
struct intel_context * ce ;
1246
1247
int guc_id ;
1247
1248
1248
- lockdep_assert_held (& guc -> contexts_lock );
1249
+ lockdep_assert_held (& guc -> submission_state . lock );
1249
1250
1250
- if (!list_empty (& guc -> guc_id_list )) {
1251
- ce = list_first_entry (& guc -> guc_id_list ,
1251
+ if (!list_empty (& guc -> submission_state . guc_id_list )) {
1252
+ ce = list_first_entry (& guc -> submission_state . guc_id_list ,
1252
1253
struct intel_context ,
1253
1254
guc_id .link );
1254
1255
@@ -1273,7 +1274,7 @@ static int assign_guc_id(struct intel_guc *guc, u16 *out)
1273
1274
{
1274
1275
int ret ;
1275
1276
1276
- lockdep_assert_held (& guc -> contexts_lock );
1277
+ lockdep_assert_held (& guc -> submission_state . lock );
1277
1278
1278
1279
ret = new_guc_id (guc );
1279
1280
if (unlikely (ret < 0 )) {
@@ -1295,7 +1296,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1295
1296
GEM_BUG_ON (atomic_read (& ce -> guc_id .ref ));
1296
1297
1297
1298
try_again :
1298
- spin_lock_irqsave (& guc -> contexts_lock , flags );
1299
+ spin_lock_irqsave (& guc -> submission_state . lock , flags );
1299
1300
1300
1301
might_lock (& ce -> guc_state .lock );
1301
1302
@@ -1310,7 +1311,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1310
1311
atomic_inc (& ce -> guc_id .ref );
1311
1312
1312
1313
out_unlock :
1313
- spin_unlock_irqrestore (& guc -> contexts_lock , flags );
1314
+ spin_unlock_irqrestore (& guc -> submission_state . lock , flags );
1314
1315
1315
1316
/*
1316
1317
* -EAGAIN indicates no guc_id are available, let's retire any
@@ -1346,11 +1347,12 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1346
1347
if (unlikely (context_guc_id_invalid (ce )))
1347
1348
return ;
1348
1349
1349
- spin_lock_irqsave (& guc -> contexts_lock , flags );
1350
+ spin_lock_irqsave (& guc -> submission_state . lock , flags );
1350
1351
if (!context_guc_id_invalid (ce ) && list_empty (& ce -> guc_id .link ) &&
1351
1352
!atomic_read (& ce -> guc_id .ref ))
1352
- list_add_tail (& ce -> guc_id .link , & guc -> guc_id_list );
1353
- spin_unlock_irqrestore (& guc -> contexts_lock , flags );
1353
+ list_add_tail (& ce -> guc_id .link ,
1354
+ & guc -> submission_state .guc_id_list );
1355
+ spin_unlock_irqrestore (& guc -> submission_state .lock , flags );
1354
1356
}
1355
1357
1356
1358
static int __guc_action_register_context (struct intel_guc * guc ,
@@ -1921,16 +1923,16 @@ static void guc_context_destroy(struct kref *kref)
1921
1923
* returns indicating this context has been deregistered the guc_id is
1922
1924
* returned to the pool of available guc_id.
1923
1925
*/
1924
- spin_lock_irqsave (& guc -> contexts_lock , flags );
1926
+ spin_lock_irqsave (& guc -> submission_state . lock , flags );
1925
1927
if (context_guc_id_invalid (ce )) {
1926
- spin_unlock_irqrestore (& guc -> contexts_lock , flags );
1928
+ spin_unlock_irqrestore (& guc -> submission_state . lock , flags );
1927
1929
__guc_context_destroy (ce );
1928
1930
return ;
1929
1931
}
1930
1932
1931
1933
if (!list_empty (& ce -> guc_id .link ))
1932
1934
list_del_init (& ce -> guc_id .link );
1933
- spin_unlock_irqrestore (& guc -> contexts_lock , flags );
1935
+ spin_unlock_irqrestore (& guc -> submission_state . lock , flags );
1934
1936
1935
1937
/* Seal race with Reset */
1936
1938
spin_lock_irqsave (& ce -> guc_state .lock , flags );
0 commit comments