@@ -58,10 +58,18 @@ struct k_spinlock {
58
58
* ID in the bottom two bits.
59
59
*/
60
60
size_t thread_cpu ;
61
+
62
+ /* Where the spinlock was taken */
63
+ int line ;
64
+ char * file ;
61
65
#endif
62
66
};
63
67
64
- static ALWAYS_INLINE k_spinlock_key_t k_spin_lock (struct k_spinlock * l )
68
+ static ALWAYS_INLINE k_spinlock_key_t z_spin_lock (struct k_spinlock * l
69
+ #ifdef SPIN_VALIDATE
70
+ , int line , char * file
71
+ #endif
72
+ )
65
73
{
66
74
ARG_UNUSED (l );
67
75
k_spinlock_key_t k ;
@@ -73,23 +81,37 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
73
81
k .key = z_arch_irq_lock ();
74
82
75
83
#ifdef SPIN_VALIDATE
76
- __ASSERT (z_spin_lock_valid (l ), "Recursive spinlock" );
84
+ __ASSERT (z_spin_lock_valid (l ),
85
+ "Recursive spinlock @%s:%d (taken at %s:%d)" ,
86
+ file , line , l -> file , l -> line );
77
87
#endif
78
88
79
89
#ifdef CONFIG_SMP
80
90
while (!atomic_cas (& l -> locked , 0 , 1 )) {
81
91
}
82
92
#endif
83
93
94
+ #ifdef SPIN_VALIDATE
95
+ l -> line = line ;
96
+ l -> file = file ;
97
+ #endif
84
98
return k ;
85
99
}
86
100
87
- static ALWAYS_INLINE void k_spin_unlock (struct k_spinlock * l ,
88
- k_spinlock_key_t key )
101
+ static ALWAYS_INLINE void z_spin_unlock (struct k_spinlock * l ,
102
+ k_spinlock_key_t key
103
+ #ifdef SPIN_VALIDATE
104
+ , int line , char * file
105
+ #endif
106
+ )
89
107
{
90
108
ARG_UNUSED (l );
91
109
#ifdef SPIN_VALIDATE
92
- __ASSERT (z_spin_unlock_valid (l ), "Not my spinlock!" );
110
+ __ASSERT (z_spin_unlock_valid (l ),
111
+ "Not my spinlock @%s:%d (taken at %s:%d)" ,
112
+ file , line , l -> file , l -> line );
113
+ l -> line = -1 ;
114
+ l -> file = NULL ;
93
115
#endif
94
116
95
117
#ifdef CONFIG_SMP
@@ -108,16 +130,33 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
108
130
/* Internal function: releases the lock, but leaves local interrupts
109
131
* disabled
110
132
*/
111
- static ALWAYS_INLINE void k_spin_release (struct k_spinlock * l )
133
+ static ALWAYS_INLINE void z_spin_release (struct k_spinlock * l
134
+ #ifdef SPIN_VALIDATE
135
+ , int line , char * file
136
+ #endif
137
+ )
112
138
{
113
139
ARG_UNUSED (l );
114
140
#ifdef SPIN_VALIDATE
115
- __ASSERT (z_spin_unlock_valid (l ), "Not my spinlock!" );
141
+ __ASSERT (z_spin_unlock_valid (l ),
142
+ "Not my spinlock @%s:%d (taken at %s:%d)" ,
143
+ file , line , l -> file , l -> line );
144
+ l -> line = -1 ;
145
+ l -> file = NULL ;
116
146
#endif
117
147
#ifdef CONFIG_SMP
118
148
atomic_clear (& l -> locked );
119
149
#endif
120
150
}
121
151
152
+ #ifdef SPIN_VALIDATE
153
+ #define k_spin_lock (l ) z_spin_lock((l), __LINE__, __FILE__)
154
+ #define k_spin_unlock (l , k ) z_spin_unlock((l), (k), __LINE__, __FILE__)
155
+ #define k_spin_release (l ) z_spin_release((l), __LINE__, __FILE__)
156
+ #else
157
+ #define k_spin_lock (l ) z_spin_lock(l)
158
+ #define k_spin_unlock (l , k ) z_spin_unlock((l), (k))
159
+ #define k_spin_release (l ) z_spin_release(l)
160
+ #endif
122
161
123
162
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
0 commit comments