@@ -29,6 +29,7 @@ static LIST_HEAD(icc_providers);
29
29
static int providers_count ;
30
30
static bool synced_state ;
31
31
static DEFINE_MUTEX (icc_lock );
32
+ static DEFINE_MUTEX (icc_bw_lock );
32
33
static struct dentry * icc_debugfs_dir ;
33
34
34
35
static void icc_summary_show_one (struct seq_file * s , struct icc_node * n )
@@ -632,7 +633,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
632
633
if (WARN_ON (IS_ERR (path ) || !path -> num_nodes ))
633
634
return - EINVAL ;
634
635
635
- mutex_lock (& icc_lock );
636
+ mutex_lock (& icc_bw_lock );
636
637
637
638
old_avg = path -> reqs [0 ].avg_bw ;
638
639
old_peak = path -> reqs [0 ].peak_bw ;
@@ -664,7 +665,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
664
665
apply_constraints (path );
665
666
}
666
667
667
- mutex_unlock (& icc_lock );
668
+ mutex_unlock (& icc_bw_lock );
668
669
669
670
trace_icc_set_bw_end (path , ret );
670
671
@@ -967,6 +968,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
967
968
return ;
968
969
969
970
mutex_lock (& icc_lock );
971
+ mutex_lock (& icc_bw_lock );
970
972
971
973
node -> provider = provider ;
972
974
list_add_tail (& node -> node_list , & provider -> nodes );
@@ -992,6 +994,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
992
994
node -> avg_bw = 0 ;
993
995
node -> peak_bw = 0 ;
994
996
997
+ mutex_unlock (& icc_bw_lock );
995
998
mutex_unlock (& icc_lock );
996
999
}
997
1000
EXPORT_SYMBOL_GPL (icc_node_add );
@@ -1129,6 +1132,7 @@ void icc_sync_state(struct device *dev)
1129
1132
return ;
1130
1133
1131
1134
mutex_lock (& icc_lock );
1135
+ mutex_lock (& icc_bw_lock );
1132
1136
synced_state = true;
1133
1137
list_for_each_entry (p , & icc_providers , provider_list ) {
1134
1138
dev_dbg (p -> dev , "interconnect provider is in synced state\n" );
0 commit comments