Skip to content

Commit 51dd92c

Browse files
committed
sched/mmcid: Serialize sched_mm_cid_fork()/exit() with a mutex
Prepare for the new CID management scheme which puts the CID ownership transition into the fork() and exit() slow path by serializing sched_mm_cid_fork()/exit() with it, so task list and cpu mask walks can be done in interruptible and preemptible code. The contention on it is not worse than on other concurrency controls in the fork()/exit() machinery. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251119172549.895826703@linutronix.de
1 parent b0c3d51 commit 51dd92c

File tree

2 files changed

+24
-0
lines changed

2 files changed

+24
-0
lines changed

include/linux/rseq_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,13 +125,15 @@ struct mm_cid_pcpu {
125125
* do not actually share the MM.
126126
* @lock: Spinlock to protect all fields except @pcpu. It also protects
127127
* the MM cid cpumask and the MM cidmask bitmap.
128+
* @mutex: Mutex to serialize forks and exits related to this mm
128129
*/
129130
struct mm_mm_cid {
130131
struct mm_cid_pcpu __percpu *pcpu;
131132
unsigned int max_cids;
132133
unsigned int nr_cpus_allowed;
133134
unsigned int users;
134135
raw_spinlock_t lock;
136+
struct mutex mutex;
135137
}____cacheline_aligned_in_smp;
136138
#else /* CONFIG_SCHED_MM_CID */
137139
struct mm_mm_cid { };

kernel/sched/core.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10369,6 +10369,25 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
1036910369
}
1037010370

1037110371
#ifdef CONFIG_SCHED_MM_CID
10372+
/*
10373+
* Concurrency IDentifier management
10374+
*
10375+
* Serialization rules:
10376+
*
10377+
* mm::mm_cid::mutex: Serializes fork() and exit() and therefore
10378+
* protects mm::mm_cid::users.
10379+
*
10380+
* mm::mm_cid::lock: Serializes mm_update_max_cids() and
10381+
* mm_update_cpus_allowed(). Nests in mm_cid::mutex
10382+
* and runqueue lock.
10383+
*
10384+
* The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
10385+
* and can only be modified with atomic operations.
10386+
*
10387+
* The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
10388+
* lock.
10389+
*/
10390+
1037210391
/*
1037310392
* Update the CID range properties when the constraints change. Invoked via
1037410393
* fork(), exit() and affinity changes
@@ -10412,6 +10431,7 @@ void sched_mm_cid_fork(struct task_struct *t)
1041210431

1041310432
WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
1041410433

10434+
guard(mutex)(&mm->mm_cid.mutex);
1041510435
guard(raw_spinlock)(&mm->mm_cid.lock);
1041610436
t->mm_cid.active = 1;
1041710437
mm->mm_cid.users++;
@@ -10431,6 +10451,7 @@ void sched_mm_cid_exit(struct task_struct *t)
1043110451
if (!mm || !t->mm_cid.active)
1043210452
return;
1043310453

10454+
guard(mutex)(&mm->mm_cid.mutex);
1043410455
guard(raw_spinlock)(&mm->mm_cid.lock);
1043510456
t->mm_cid.active = 0;
1043610457
mm->mm_cid.users--;
@@ -10467,6 +10488,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
1046710488
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
1046810489
mm->mm_cid.users = 0;
1046910490
raw_spin_lock_init(&mm->mm_cid.lock);
10491+
mutex_init(&mm->mm_cid.mutex);
1047010492
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
1047110493
bitmap_zero(mm_cidmask(mm), num_possible_cpus());
1047210494
}

0 commit comments

Comments
 (0)