Skip to content

Commit f6d5de3

Browse files
author
Andy Ross
committed
kernel/spinlock: More validation
Catching the error in a function is nice, but one really wants to know where it happened, and where the recursive lock was taken (or where the unowned release was actually grabbed). Add a layer of macro indirection to catch this info and log it with the assertion. Signed-off-by: Andy Ross <[email protected]>
1 parent 3aa8443 commit f6d5de3

File tree

1 file changed

+46
-7
lines changed

1 file changed

+46
-7
lines changed

include/spinlock.h

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,18 @@ struct k_spinlock {
5858
* ID in the bottom two bits.
5959
*/
6060
size_t thread_cpu;
61+
62+
/* Where the spinlock was taken */
63+
int line;
64+
char *file;
6165
#endif
6266
};
6367

64-
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
68+
static ALWAYS_INLINE k_spinlock_key_t z_spin_lock(struct k_spinlock *l
69+
#ifdef SPIN_VALIDATE
70+
, int line, char *file
71+
#endif
72+
)
6573
{
6674
ARG_UNUSED(l);
6775
k_spinlock_key_t k;
@@ -73,23 +81,37 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
7381
k.key = z_arch_irq_lock();
7482

7583
#ifdef SPIN_VALIDATE
76-
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
84+
__ASSERT(z_spin_lock_valid(l),
85+
"Recursive spinlock @%s:%d (taken at %s:%d)",
86+
file, line, l->file, l->line);
7787
#endif
7888

7989
#ifdef CONFIG_SMP
8090
while (!atomic_cas(&l->locked, 0, 1)) {
8191
}
8292
#endif
8393

94+
#ifdef SPIN_VALIDATE
95+
l->line = line;
96+
l->file = file;
97+
#endif
8498
return k;
8599
}
86100

87-
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
88-
k_spinlock_key_t key)
101+
static ALWAYS_INLINE void z_spin_unlock(struct k_spinlock *l,
102+
k_spinlock_key_t key
103+
#ifdef SPIN_VALIDATE
104+
, int line, char *file
105+
#endif
106+
)
89107
{
90108
ARG_UNUSED(l);
91109
#ifdef SPIN_VALIDATE
92-
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
110+
__ASSERT(z_spin_unlock_valid(l),
111+
"Not my spinlock @%s:%d (taken at %s:%d)",
112+
file, line, l->file, l->line);
113+
l->line = -1;
114+
l->file = NULL;
93115
#endif
94116

95117
#ifdef CONFIG_SMP
@@ -108,16 +130,33 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
108130
/* Internal function: releases the lock, but leaves local interrupts
109131
* disabled
110132
*/
111-
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
133+
static ALWAYS_INLINE void z_spin_release(struct k_spinlock *l
134+
#ifdef SPIN_VALIDATE
135+
, int line, char *file
136+
#endif
137+
)
112138
{
113139
ARG_UNUSED(l);
114140
#ifdef SPIN_VALIDATE
115-
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
141+
__ASSERT(z_spin_unlock_valid(l),
142+
"Not my spinlock @%s:%d (taken at %s:%d)",
143+
file, line, l->file, l->line);
144+
l->line = -1;
145+
l->file = NULL;
116146
#endif
117147
#ifdef CONFIG_SMP
118148
atomic_clear(&l->locked);
119149
#endif
120150
}
121151

152+
#ifdef SPIN_VALIDATE
153+
#define k_spin_lock(l) z_spin_lock((l), __LINE__, __FILE__)
154+
#define k_spin_unlock(l, k) z_spin_unlock((l), (k), __LINE__, __FILE__)
155+
#define k_spin_release(l) z_spin_release((l), __LINE__, __FILE__)
156+
#else
157+
#define k_spin_lock(l) z_spin_lock(l)
158+
#define k_spin_unlock(l, k) z_spin_unlock((l), (k))
159+
#define k_spin_release(l) z_spin_release(l)
160+
#endif
122161

123162
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */

0 commit comments

Comments
 (0)