summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpengdonglin <pengdonglin@xiaomi.com>2025-09-16 12:47:30 +0800
committerTejun Heo <tj@kernel.org>2025-09-16 08:36:24 -1000
commit58ab6d25a1bfca42510979cb2b6921f1c807bd02 (patch)
tree5472ee698a06923a78ca629ec4d9f8d7187af9cd
parent3ee4211ef8693377f67624a46b4f8577f6a495d9 (diff)
cgroup/cpuset: Remove redundant rcu_read_lock/unlock() in spin_lock
Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Waiman Long <longman@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: pengdonglin <pengdonglin@xiaomi.com> Signed-off-by: pengdonglin <dolinux.peng@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/cgroup/cpuset.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 0d41b4993f8c..caa885823eeb 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -4107,7 +4107,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
struct cpuset *cs;
spin_lock_irqsave(&callback_lock, flags);
- rcu_read_lock();
cs = task_cs(tsk);
if (cs != &top_cpuset)
@@ -4129,7 +4128,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
cpumask_copy(pmask, possible_mask);
}
- rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
}
@@ -4202,9 +4200,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
unsigned long flags;
spin_lock_irqsave(&callback_lock, flags);
- rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
- rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
return mask;
@@ -4299,10 +4295,8 @@ bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
/* Not hardwall and node outside mems_allowed: scan up cpusets */
spin_lock_irqsave(&callback_lock, flags);
- rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
- rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
return allowed;