From e3ff7c609f39671d1aaff4fb4a8594e14f3e03f8 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 24 Feb 2023 08:50:00 -0800 Subject: livepatch,sched: Add livepatch task switching to cond_resched() There have been reports [1][2] of live patches failing to complete within a reasonable amount of time due to CPU-bound kthreads. Fix it by patching tasks in cond_resched(). There are four different flavors of cond_resched(), depending on the kernel configuration. Hook into all of them. A more elegant solution might be to use a preempt notifier. However, non-ORC unwinders can't unwind a preempted task reliably. [1] https://lore.kernel.org/lkml/20220507174628.2086373-1-song@kernel.org/ [2] https://lkml.kernel.org/lkml/20230120-vhost-klp-switching-v1-0-7c2b65519c43@kernel.org Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Petr Mladek Tested-by: Seth Forshee (DigitalOcean) Link: https://lore.kernel.org/r/4ae981466b7814ec221014fc2554b2f86f3fb70b.1677257135.git.jpoimboe@kernel.org --- include/linux/livepatch.h | 1 + include/linux/livepatch_sched.h | 29 +++++++++++++++++++++++++++++ include/linux/sched.h | 20 +++++++++++++++----- 3 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 include/linux/livepatch_sched.h (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 293e29960c6e..9b9b38e89563 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -13,6 +13,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_LIVEPATCH) diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h new file mode 100644 index 000000000000..013794fb5da0 --- /dev/null +++ b/include/linux/livepatch_sched.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_LIVEPATCH_SCHED_H_ +#define _LINUX_LIVEPATCH_SCHED_H_ + +#include +#include + +#ifdef CONFIG_LIVEPATCH + +void __klp_sched_try_switch(void); + +#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) + +DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key); + +static __always_inline void klp_sched_try_switch(void) +{ + if (static_branch_unlikely(&klp_sched_try_switch_key)) + __klp_sched_try_switch(); +} + +#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ + +#else /* !CONFIG_LIVEPATCH */ +static inline void klp_sched_try_switch(void) {} +static inline void __klp_sched_try_switch(void) {} +#endif /* CONFIG_LIVEPATCH */ + +#endif /* _LINUX_LIVEPATCH_SCHED_H_ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 63d242164b1a..6d654eb4cabd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -36,6 +36,7 @@ #include #include #include +#include #include /* task_struct member predeclarations (sorted alphabetically): */ @@ -2070,6 +2071,9 @@ extern int __cond_resched(void); #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) +void sched_dynamic_klp_enable(void); +void sched_dynamic_klp_disable(void); + DECLARE_STATIC_CALL(cond_resched, __cond_resched); static __always_inline int _cond_resched(void) @@ -2078,6 +2082,7 @@ static __always_inline int _cond_resched(void) } #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) + extern int dynamic_cond_resched(void); static __always_inline int _cond_resched(void) @@ -2085,20 +2090,25 @@ static __always_inline int _cond_resched(void) return dynamic_cond_resched(); } -#else +#else /* !CONFIG_PREEMPTION */ static inline int _cond_resched(void) { + klp_sched_try_switch(); return __cond_resched(); } -#endif /* CONFIG_PREEMPT_DYNAMIC */ +#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ -#else +#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */ -static inline int _cond_resched(void) { return 0; } +static inline int _cond_resched(void) +{ + klp_sched_try_switch(); + return 0; +} -#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ +#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */ #define cond_resched() ({ \ __might_resched(__FILE__, __LINE__, 0); \ -- cgit From 65457b74aa9437418e552e8d52d7112d4f9901a6 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Thu, 30 Mar 2023 12:54:16 +0200 Subject: sched/psi: Rename existing poll members in preparation Renaming in PSI implementation to make a clear distinction between privileged and unprivileged triggers code to be implemented in the next patch. Suggested-by: Johannes Weiner Signed-off-by: Domenico Cerasuolo Signed-off-by: Peter Zijlstra (Intel) Acked-by: Johannes Weiner Link: https://lore.kernel.org/r/20230330105418.77061-3-cerasuolodomenico@gmail.com --- include/linux/psi_types.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 1e0a0d7ace3a..1819afa8b198 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -175,26 +175,26 @@ struct psi_group { u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1]; unsigned long avg[NR_PSI_STATES - 1][3]; - /* Monitor work control */ - struct task_struct __rcu *poll_task; - struct timer_list poll_timer; - wait_queue_head_t poll_wait; - atomic_t poll_wakeup; - atomic_t poll_scheduled; + /* Monitor RT polling work control */ + struct task_struct __rcu *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; /* Protects data used by the monitor */ - struct mutex trigger_lock; - - /* Configured polling triggers */ - struct list_head triggers; - u32 nr_triggers[NR_PSI_STATES - 1]; - u32 poll_states; - u64 poll_min_period; - - /* Total stall times at the start of monitor activation */ - u64 polling_total[NR_PSI_STATES - 1]; - u64 polling_next_update; - u64 polling_until; + struct mutex rtpoll_trigger_lock; + + /* Configured RT polling triggers */ + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[NR_PSI_STATES - 1]; + u32 rtpoll_states; + u64 rtpoll_min_period; + + /* Total stall times at the start of RT polling monitor activation */ + u64 rtpoll_total[NR_PSI_STATES - 1]; + u64 rtpoll_next_update; + u64 rtpoll_until; }; #else /* CONFIG_PSI */ -- cgit From d82caa273565b45fcf103148950549af76c314b0 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Thu, 30 Mar 2023 12:54:18 +0200 Subject: sched/psi: Allow unprivileged polling of N*2s period PSI offers 2 mechanisms to get information about a specific resource pressure. One is reading from /proc/pressure/, which gives average pressures aggregated every 2s. The other is creating a pollable fd for a specific resource and cgroup. The trigger creation requires CAP_SYS_RESOURCE, and gives the possibility to pick specific time window and threshold, spawing an RT thread to aggregate the data. Systemd would like to provide containers the option to monitor pressure on their own cgroup and sub-cgroups. For example, if systemd launches a container that itself then launches services, the container should have the ability to poll() for pressure in individual services. But neither the container nor the services are privileged. This patch implements a mechanism to allow unprivileged users to create pressure triggers. The difference with privileged triggers creation is that unprivileged ones must have a time window that's a multiple of 2s. This is so that we can avoid unrestricted spawning of rt threads, and use instead the same aggregation mechanism done for the averages, which runs independently of any triggers. Suggested-by: Johannes Weiner Signed-off-by: Domenico Cerasuolo Signed-off-by: Peter Zijlstra (Intel) Acked-by: Johannes Weiner Link: https://lore.kernel.org/r/20230330105418.77061-5-cerasuolodomenico@gmail.com --- include/linux/psi.h | 2 +- include/linux/psi_types.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/psi.h b/include/linux/psi.h index b029a847def1..ab26200c2803 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -24,7 +24,7 @@ void psi_memstall_leave(unsigned long *flags); int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); struct psi_trigger *psi_trigger_create(struct psi_group *group, - char *buf, enum psi_res res); + char *buf, enum psi_res res, struct file *file); void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 1819afa8b198..040c089581c6 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -151,6 +151,9 @@ struct psi_trigger { /* Deferred event(s) from previous ratelimit window */ bool pending_event; + + /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */ + enum psi_aggregators aggregator; }; struct psi_group { @@ -171,6 +174,10 @@ struct psi_group { /* Aggregator work control */ struct delayed_work avgs_work; + /* Unprivileged triggers against N*PSI_FREQ windows */ + struct list_head avg_triggers; + u32 avg_nr_triggers[NR_PSI_STATES - 1]; + /* Total stall times and sampled pressure averages */ u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1]; unsigned long avg[NR_PSI_STATES - 1][3]; -- cgit From 223baf9d17f25e2608dbdff7232c095c1e612268 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 20 Apr 2023 10:55:48 -0400 Subject: sched: Fix performance regression introduced by mm_cid Introduce per-mm/cpu current concurrency id (mm_cid) to fix a PostgreSQL sysbench regression reported by Aaron Lu. Keep track of the currently allocated mm_cid for each mm/cpu rather than freeing them immediately on context switch. This eliminates most atomic operations when context switching back and forth between threads belonging to different memory spaces in multi-threaded scenarios (many processes, each with many threads). The per-mm/per-cpu mm_cid values are serialized by their respective runqueue locks. Thread migration is handled by introducing invocation to sched_mm_cid_migrate_to() (with destination runqueue lock held) in activate_task() for migrating tasks. If the destination cpu's mm_cid is unset, and if the source runqueue is not actively using its mm_cid, then the source cpu's mm_cid is moved to the destination cpu on migration. Introduce a task-work executed periodically, similarly to NUMA work, which delays reclaim of cid values when they are unused for a period of time. Keep track of the allocation time for each per-cpu cid, and let the task work clear them when they are observed to be older than SCHED_MM_CID_PERIOD_NS and unused. This task work also clears all mm_cids which are greater or equal to the Hamming weight of the mm cidmask to keep concurrency ids compact. Because we want to ensure the mm_cid converges towards the smaller values as migrations happen, the prior optimization that was done when context switching between threads belonging to the same mm is removed, because it could delay the lazy release of the destination runqueue mm_cid after it has been replaced by a migration. Removing this prior optimization is not an issue performance-wise because the introduced per-mm/per-cpu mm_cid tracking also covers this more specific case. Fixes: af7f588d8f73 ("sched: Introduce per-memory-map concurrency ID") Reported-by: Aaron Lu Signed-off-by: Mathieu Desnoyers Signed-off-by: Peter Zijlstra (Intel) Tested-by: Aaron Lu Link: https://lore.kernel.org/lkml/20230327080502.GA570847@ziqianlu-desk2/ --- include/linux/mm_types.h | 82 +++++++++++++++++++++++++++++++++++++++++++----- include/linux/sched.h | 3 ++ include/linux/sched/mm.h | 5 +++ 3 files changed, 82 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a57e6ae78e65..5eab61156f0e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -550,6 +550,13 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; +#ifdef CONFIG_SCHED_MM_CID +struct mm_cid { + u64 time; + int cid; +}; +#endif + struct kioctx_table; struct mm_struct { struct { @@ -600,15 +607,19 @@ struct mm_struct { atomic_t mm_count; #ifdef CONFIG_SCHED_MM_CID /** - * @cid_lock: Protect cid bitmap updates vs lookups. + * @pcpu_cid: Per-cpu current cid. * - * Prevent situations where updates to the cid bitmap happen - * concurrently with lookups. Those can lead to situations - * where a lookup cannot find a free bit simply because it was - * unlucky enough to load, non-atomically, bitmap words as they - * were being concurrently updated by the updaters. + * Keep track of the currently allocated mm_cid for each cpu. + * The per-cpu mm_cid values are serialized by their respective + * runqueue locks. */ - raw_spinlock_t cid_lock; + struct mm_cid __percpu *pcpu_cid; + /* + * @mm_cid_next_scan: Next mm_cid scan (in jiffies). + * + * When the next mm_cid scan is due (in jiffies). + */ + unsigned long mm_cid_next_scan; #endif #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* size of all page tables */ @@ -873,6 +884,37 @@ static inline void vma_iter_init(struct vma_iterator *vmi, } #ifdef CONFIG_SCHED_MM_CID + +enum mm_cid_state { + MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */ + MM_CID_LAZY_PUT = (1U << 31), +}; + +static inline bool mm_cid_is_unset(int cid) +{ + return cid == MM_CID_UNSET; +} + +static inline bool mm_cid_is_lazy_put(int cid) +{ + return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT); +} + +static inline bool mm_cid_is_valid(int cid) +{ + return !(cid & MM_CID_LAZY_PUT); +} + +static inline int mm_cid_set_lazy_put(int cid) +{ + return cid | MM_CID_LAZY_PUT; +} + +static inline int mm_cid_clear_lazy_put(int cid) +{ + return cid & ~MM_CID_LAZY_PUT; +} + /* Accessor for struct mm_struct's cidmask. */ static inline cpumask_t *mm_cidmask(struct mm_struct *mm) { @@ -886,16 +928,40 @@ static inline cpumask_t *mm_cidmask(struct mm_struct *mm) static inline void mm_init_cid(struct mm_struct *mm) { - raw_spin_lock_init(&mm->cid_lock); + int i; + + for_each_possible_cpu(i) { + struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); + + pcpu_cid->cid = MM_CID_UNSET; + pcpu_cid->time = 0; + } cpumask_clear(mm_cidmask(mm)); } +static inline int mm_alloc_cid(struct mm_struct *mm) +{ + mm->pcpu_cid = alloc_percpu(struct mm_cid); + if (!mm->pcpu_cid) + return -ENOMEM; + mm_init_cid(mm); + return 0; +} + +static inline void mm_destroy_cid(struct mm_struct *mm) +{ + free_percpu(mm->pcpu_cid); + mm->pcpu_cid = NULL; +} + static inline unsigned int mm_cid_size(void) { return cpumask_size(); } #else /* CONFIG_SCHED_MM_CID */ static inline void mm_init_cid(struct mm_struct *mm) { } +static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; } +static inline void mm_destroy_cid(struct mm_struct *mm) { } static inline unsigned int mm_cid_size(void) { return 0; diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d654eb4cabd..675298d6eb36 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1314,7 +1314,10 @@ struct task_struct { #ifdef CONFIG_SCHED_MM_CID int mm_cid; /* Current cid in mm */ + int last_mm_cid; /* Most recent cid in mm */ + int migrate_from_cpu; int mm_cid_active; /* Whether cid bitmap is active */ + struct callback_head cid_work; #endif struct tlbflush_unmap_batch tlb_ubc; diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 2a243616f222..f20fc0600fcc 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -37,6 +37,11 @@ static inline void mmgrab(struct mm_struct *mm) atomic_inc(&mm->mm_count); } +static inline void smp_mb__after_mmgrab(void) +{ + smp_mb__after_atomic(); +} + extern void __mmdrop(struct mm_struct *mm); static inline void mmdrop(struct mm_struct *mm) -- cgit