diff options
author | Waiman Long <longman@redhat.com> | 2025-04-07 17:21:05 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2025-04-07 12:05:09 -1000 |
commit | 86888c7bd117c29eab169c37e5f6bbbf583da983 (patch) | |
tree | be16e5244343653121def121c6c8f5cc047f9834 /kernel/cgroup | |
parent | 6e6f9170dc6a4660f7a90d67122a08ca919b5f53 (diff) |
cgroup/cpuset: Add warnings to catch inconsistency in exclusive CPUs
Add WARN_ON_ONCE() statements whenever new exclusive CPUs are being
added to a partition root to catch inconsistency in the way exclusive
CPUs are being handled in the cpuset code.
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r-- | kernel/cgroup/cpuset.c | 43 |
1 files changed, 33 insertions, 10 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1497a2a4b2a3..d0143b3dce47 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1453,13 +1453,15 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, * The requested exclusive_cpus must not be allocated to other * partitions and it can't use up all the root's effective_cpus. * - * Note that if there is any local partition root above it or - * remote partition root underneath it, its exclusive_cpus must - * have overlapped with subpartitions_cpus. + * The effective_xcpus mask can contain offline CPUs, but there must + * be at least one or more online CPUs present before it can be enabled. + * + * Note that creating a remote partition with any local partition root + * above it or remote partition root underneath it is not allowed. */ compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL); - if (cpumask_empty(tmp->new_cpus) || - cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || + WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus)); + if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) return PERR_INVCPUS; @@ -1555,6 +1557,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, * left in the top cpuset. */ if (adding) { + WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus)); if (!capable(CAP_SYS_ADMIN)) cs->prs_err = PERR_ACCESS; else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || @@ -1664,7 +1667,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, bool nocpu; lockdep_assert_held(&cpuset_mutex); - WARN_ON_ONCE(is_remote_partition(cs)); + WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */ /* * new_prs will only be changed for the partcmd_update and @@ -1710,7 +1713,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, * exclusive_cpus not set. Sibling conflict should only happen * if exclusive_cpus isn't set. */ - xcpus = tmp->new_cpus; + xcpus = tmp->delmask; if (compute_effective_exclusive_cpumask(cs, xcpus, NULL)) WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus)); @@ -1731,9 +1734,20 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, if (nocpu) return PERR_NOCPUS; - deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus); - if (deleting) - subparts_delta++; + /* + * This function will only be called when all the preliminary + * checks have passed. At this point, the following condition + * should hold. + * + * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus + * + * Warn if it is not the case. + */ + cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask); + WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus)); + + deleting = true; + subparts_delta++; new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED; } else if (cmd == partcmd_disable) { /* @@ -1788,6 +1802,15 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, parent->effective_xcpus); } /* + * The new CPUs to be removed from parent's effective CPUs + * must be present. + */ + if (deleting) { + cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask); + WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus)); + } + + /* * Make partition invalid if parent's effective_cpus could * become empty and there are tasks in the parent. */ |