Skip to content

Commit 7ee58f9

Browse files
KAGA-KOKOPeter Zijlstra
authored andcommitted
rseq: Reset slice extension when scheduled
When a time slice extension was granted in the need_resched() check on exit to user space, the task can still be scheduled out in one of the other pending work items. When it gets scheduled back in, and need_resched() is not set, then the stale grant would be preserved, which is just wrong. RSEQ already keeps track of that and sets TIF_RSEQ, which invokes the critical section and ID update mechanisms. Utilize them and clear the user space slice control member of struct rseq unconditionally within the existing user access sections. That's just an unconditional store more in that path. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251215155709.131081527@linutronix.de
1 parent 0ac3b5c commit 7ee58f9

File tree

1 file changed

+28
-2
lines changed

1 file changed

+28
-2
lines changed

include/linux/rseq_entry.h

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,17 @@ static __always_inline bool rseq_arm_slice_extension_timer(void)
102102
return __rseq_arm_slice_extension_timer();
103103
}
104104

105+
static __always_inline void rseq_slice_clear_grant(struct task_struct *t)
106+
{
107+
if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
108+
rseq_stat_inc(rseq_stats.s_revoked);
109+
t->rseq.slice.state.granted = false;
110+
}
111+
105112
#else /* CONFIG_RSEQ_SLICE_EXTENSION */
106113
static inline bool rseq_slice_extension_enabled(void) { return false; }
107114
static inline bool rseq_arm_slice_extension_timer(void) { return false; }
115+
static inline void rseq_slice_clear_grant(struct task_struct *t) { }
108116
#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
109117

110118
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
@@ -391,8 +399,15 @@ bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
391399
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
392400
if (csaddr)
393401
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
402+
403+
/* Open coded, so it's in the same user access region */
404+
if (rseq_slice_extension_enabled()) {
405+
/* Unconditionally clear it, no point in conditionals */
406+
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
407+
}
394408
}
395409

410+
rseq_slice_clear_grant(t);
396411
/* Cache the new values */
397412
t->rseq.ids.cpu_cid = ids->cpu_cid;
398413
rseq_stat_inc(rseq_stats.ids);
@@ -488,8 +503,17 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
488503
*/
489504
u64 csaddr;
490505

491-
if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
492-
return false;
506+
scoped_user_rw_access(rseq, efault) {
507+
unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
508+
509+
/* Open coded, so it's in the same user access region */
510+
if (rseq_slice_extension_enabled()) {
511+
/* Unconditionally clear it, no point in conditionals */
512+
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
513+
}
514+
}
515+
516+
rseq_slice_clear_grant(t);
493517

494518
if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
495519
if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
@@ -505,6 +529,8 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
505529
u32 node_id = cpu_to_node(ids.cpu_id);
506530

507531
return rseq_update_usr(t, regs, &ids, node_id);
532+
efault:
533+
return false;
508534
}
509535

510536
static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)

0 commit comments

Comments
 (0)