Skip to content

Commit e93d035

Browse files
Peter Zijlstragregkh
Peter Zijlstra
authored andcommitted
seqlock/latch: Provide raw_read_seqcount_latch_retry()
[ Upstream commit d16317d ] The read side of seqcount_latch consists of: do { seq = raw_read_seqcount_latch(&latch->seq); ... } while (read_seqcount_latch_retry(&latch->seq, seq)); which is asymmetric in the raw_ department, and sure enough, read_seqcount_latch_retry() includes (explicit) instrumentation where raw_read_seqcount_latch() does not. This inconsistency becomes a problem when trying to use it from noinstr code. As such, fix it by renaming and re-implementing raw_read_seqcount_latch_retry() without the instrumentation. Specifically the instrumentation in question is kcsan_atomic_next(0) in do___read_seqcount_retry(). Loosing this annotation is not a problem because raw_read_seqcount_latch() does not pass through kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX). Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Reviewed-by: Petr Mladek <[email protected]> Tested-by: Michael Kelley <[email protected]> # Hyper-V Link: https://lore.kernel.org/r/[email protected] Stable-dep-of: 5c1806c ("kcsan, seqlock: Support seqcount_latch_t") Signed-off-by: Sasha Levin <[email protected]>
1 parent 445d562 commit e93d035

File tree

5 files changed

+13
-12
lines changed

5 files changed

+13
-12
lines changed

include/linux/rbtree_latch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
206206
do {
207207
seq = raw_read_seqcount_latch(&root->seq);
208208
node = __lt_find(key, root, seq & 1, ops->comp);
209-
} while (read_seqcount_latch_retry(&root->seq, seq));
209+
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
210210

211211
return node;
212212
}

include/linux/seqlock.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -671,9 +671,9 @@ typedef struct {
671671
*
672672
* Return: sequence counter raw value. Use the lowest bit as an index for
673673
* picking which data copy to read. The full counter must then be checked
674-
* with read_seqcount_latch_retry().
674+
* with raw_read_seqcount_latch_retry().
675675
*/
676-
static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
676+
static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
677677
{
678678
/*
679679
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -683,16 +683,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
683683
}
684684

685685
/**
686-
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
686+
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
687687
* @s: Pointer to seqcount_latch_t
688688
* @start: count, from raw_read_seqcount_latch()
689689
*
690690
* Return: true if a read section retry is required, else false
691691
*/
692-
static inline int
693-
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
692+
static __always_inline int
693+
raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
694694
{
695-
return read_seqcount_retry(&s->seqcount, start);
695+
smp_rmb();
696+
return unlikely(READ_ONCE(s->seqcount.sequence) != start);
696697
}
697698

698699
/**
@@ -752,7 +753,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
752753
* entry = data_query(latch->data[idx], ...);
753754
*
754755
* // This includes needed smp_rmb()
755-
* } while (read_seqcount_latch_retry(&latch->seq, seq));
756+
* } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
756757
*
757758
* return entry;
758759
* }

kernel/printk/printk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -457,7 +457,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
457457
seq = raw_read_seqcount_latch(&ls->latch);
458458
idx = seq & 0x1;
459459
val = ls->val[idx];
460-
} while (read_seqcount_latch_retry(&ls->latch, seq));
460+
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
461461

462462
return val;
463463
}

kernel/time/sched_clock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
7777

7878
notrace int sched_clock_read_retry(unsigned int seq)
7979
{
80-
return read_seqcount_latch_retry(&cd.seq, seq);
80+
return raw_read_seqcount_latch_retry(&cd.seq, seq);
8181
}
8282

8383
unsigned long long notrace sched_clock(void)

kernel/time/timekeeping.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
450450
tkr = tkf->base + (seq & 0x01);
451451
now = ktime_to_ns(tkr->base);
452452
now += fast_tk_get_delta_ns(tkr);
453-
} while (read_seqcount_latch_retry(&tkf->seq, seq));
453+
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
454454

455455
return now;
456456
}
@@ -566,7 +566,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
566566
basem = ktime_to_ns(tkr->base);
567567
baser = ktime_to_ns(tkr->base_real);
568568
delta = fast_tk_get_delta_ns(tkr);
569-
} while (read_seqcount_latch_retry(&tkf->seq, seq));
569+
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
570570

571571
if (mono)
572572
*mono = basem + delta;

0 commit comments

Comments
 (0)