diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/dma/debug.c | 5 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 2 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 4 | ||||
| -rw-r--r-- | kernel/power/hibernate.c | 4 | ||||
| -rw-r--r-- | kernel/power/main.c | 22 | ||||
| -rw-r--r-- | kernel/power/process.c | 1 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 1 | ||||
| -rw-r--r-- | kernel/sched/ext.c | 126 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 12 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 3 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
| -rw-r--r-- | kernel/trace/rv/monitors/pagefault/Kconfig | 1 | ||||
| -rw-r--r-- | kernel/trace/rv/rv.c | 12 | 
13 files changed, 157 insertions, 38 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 1e5c64cb6a42..138ede653de4 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -23,6 +23,7 @@  #include <linux/ctype.h>  #include <linux/list.h>  #include <linux/slab.h> +#include <linux/swiotlb.h>  #include <asm/sections.h>  #include "debug.h" @@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)  	if (rc == -ENOMEM) {  		pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");  		global_disable = true; -	} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { +	} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && +		   !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && +		     is_swiotlb_active(entry->dev))) {  		err_printk(entry->dev, entry,  			"cacheline tracking EEXIST, overlapping mappings aren't supported\n");  	} diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3ffa0d80ddd1..d1917b28761a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1030,7 +1030,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,  void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,  		       const char *name)  { -	scoped_irqdesc_get_and_lock(irq, 0) +	scoped_irqdesc_get_and_buslock(irq, 0)  		__irq_do_set_handler(scoped_irqdesc, handle, is_chained, name);  }  EXPORT_SYMBOL_GPL(__irq_set_handler); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c94837382037..400856abf672 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -659,7 +659,7 @@ void __disable_irq(struct irq_desc *desc)  static int __disable_irq_nosync(unsigned int irq)  { -	scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { +	scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {  		__disable_irq(scoped_irqdesc);  		return 0;  	} @@ -789,7 +789,7 @@ void __enable_irq(struct irq_desc *desc)   */  void enable_irq(unsigned int irq)  { -	scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { +	scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {  		struct irq_desc *desc = scoped_irqdesc;  		if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 14e85ff23551..53166ef86ba4 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -706,7 +706,6 @@ static void power_down(void)  #ifdef CONFIG_SUSPEND  	if (hibernation_mode == HIBERNATION_SUSPEND) { -		pm_restore_gfp_mask();  		error = suspend_devices_and_enter(mem_sleep_current);  		if (!error)  			goto exit; @@ -746,9 +745,6 @@ static void power_down(void)  		cpu_relax();  exit: -	/* Match the pm_restore_gfp_mask() call in hibernate(). */ -	pm_restrict_gfp_mask(); -  	/* Restore swap signature. */  	error = swsusp_unmark();  	if (error) diff --git a/kernel/power/main.c b/kernel/power/main.c index 3cf2d7e72567..549f51ca3a1e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -31,23 +31,35 @@   * held, unless the suspend/hibernate code is guaranteed not to run in parallel   * with that modification).   */ +static unsigned int saved_gfp_count;  static gfp_t saved_gfp_mask;  void pm_restore_gfp_mask(void)  {  	WARN_ON(!mutex_is_locked(&system_transition_mutex)); -	if (saved_gfp_mask) { -		gfp_allowed_mask = saved_gfp_mask; -		saved_gfp_mask = 0; -	} + +	if (WARN_ON(!saved_gfp_count) || --saved_gfp_count) +		return; + +	gfp_allowed_mask = saved_gfp_mask; +	saved_gfp_mask = 0; + +	pm_pr_dbg("GFP mask restored\n");  }  void pm_restrict_gfp_mask(void)  {  	WARN_ON(!mutex_is_locked(&system_transition_mutex)); -	WARN_ON(saved_gfp_mask); + +	if (saved_gfp_count++) { +		WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask); +		return; +	} +  	saved_gfp_mask = gfp_allowed_mask;  	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); + +	pm_pr_dbg("GFP mask restricted\n");  }  unsigned int lock_system_sleep(void) diff --git a/kernel/power/process.c b/kernel/power/process.c index 8ff68ebaa1e0..dc0dfc349f22 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,6 +132,7 @@ int freeze_processes(void)  	if (!pm_freezing)  		static_branch_inc(&freezer_active); +	pm_wakeup_clear(0);  	pm_freezing = true;  	error = try_to_freeze_tasks(true);  	if (!error) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4bb4686c1c08..b4ca17c2fecf 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -595,7 +595,6 @@ static int enter_state(suspend_state_t state)  	}  	pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); -	pm_wakeup_clear(0);  	pm_suspend_clear_flags();  	error = suspend_prepare(state);  	if (error) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2b0e88206d07..ecb251e883ea 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -67,8 +67,19 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;  static struct delayed_work scx_watchdog_work; -/* for %SCX_KICK_WAIT */ -static unsigned long __percpu *scx_kick_cpus_pnt_seqs; +/* + * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence + * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu + * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated + * lazily when enabling and freed when disabling to avoid waste when sched_ext + * isn't active. + */ +struct scx_kick_pseqs { +	struct rcu_head		rcu; +	unsigned long		seqs[]; +}; + +static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs);  /*   * Direct dispatch marker. @@ -780,13 +791,23 @@ static void schedule_deferred(struct rq *rq)  	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)  		return; +	/* Don't do anything if there already is a deferred operation. */ +	if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) +		return; +  	/*  	 * If in balance, the balance callbacks will be called before rq lock is  	 * released. Schedule one. +	 * +	 * +	 * We can't directly insert the callback into the +	 * rq's list: The call can drop its lock and make the pending balance +	 * callback visible to unrelated code paths that call rq_pin_lock(). +	 * +	 * Just let balance_one() know that it must do it itself.  	 */  	if (rq->scx.flags & SCX_RQ_IN_BALANCE) { -		queue_balance_callback(rq, &rq->scx.deferred_bal_cb, -				       deferred_bal_cb_workfn); +		rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;  		return;  	} @@ -2003,6 +2024,19 @@ static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)  	dspc->cursor = 0;  } +static inline void maybe_queue_balance_callback(struct rq *rq) +{ +	lockdep_assert_rq_held(rq); + +	if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) +		return; + +	queue_balance_callback(rq, &rq->scx.deferred_bal_cb, +				deferred_bal_cb_workfn); + +	rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; +} +  static int balance_one(struct rq *rq, struct task_struct *prev)  {  	struct scx_sched *sch = scx_root; @@ -2150,6 +2184,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev,  #endif  	rq_repin_lock(rq, rf); +	maybe_queue_balance_callback(rq); +  	return ret;  } @@ -3471,7 +3507,9 @@ static void scx_sched_free_rcu_work(struct work_struct *work)  	struct scx_dispatch_q *dsq;  	int node; +	irq_work_sync(&sch->error_irq_work);  	kthread_stop(sch->helper->task); +  	free_percpu(sch->pcpu);  	for_each_node_state(node, N_POSSIBLE) @@ -3850,6 +3888,27 @@ static const char *scx_exit_reason(enum scx_exit_kind kind)  	}  } +static void free_kick_pseqs_rcu(struct rcu_head *rcu) +{ +	struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu); + +	kvfree(pseqs); +} + +static void free_kick_pseqs(void) +{ +	int cpu; + +	for_each_possible_cpu(cpu) { +		struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); +		struct scx_kick_pseqs *to_free; + +		to_free = rcu_replace_pointer(*pseqs, NULL, true); +		if (to_free) +			call_rcu(&to_free->rcu, free_kick_pseqs_rcu); +	} +} +  static void scx_disable_workfn(struct kthread_work *work)  {  	struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); @@ -3986,6 +4045,7 @@ static void scx_disable_workfn(struct kthread_work *work)  	free_percpu(scx_dsp_ctx);  	scx_dsp_ctx = NULL;  	scx_dsp_max_batch = 0; +	free_kick_pseqs();  	mutex_unlock(&scx_enable_mutex); @@ -4348,6 +4408,33 @@ static void scx_vexit(struct scx_sched *sch,  	irq_work_queue(&sch->error_irq_work);  } +static int alloc_kick_pseqs(void) +{ +	int cpu; + +	/* +	 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size +	 * can exceed percpu allocator limits on large machines. +	 */ +	for_each_possible_cpu(cpu) { +		struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); +		struct scx_kick_pseqs *new_pseqs; + +		WARN_ON_ONCE(rcu_access_pointer(*pseqs)); + +		new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids), +					  GFP_KERNEL, cpu_to_node(cpu)); +		if (!new_pseqs) { +			free_kick_pseqs(); +			return -ENOMEM; +		} + +		rcu_assign_pointer(*pseqs, new_pseqs); +	} + +	return 0; +} +  static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)  {  	struct scx_sched *sch; @@ -4495,10 +4582,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)  		goto err_unlock;  	} +	ret = alloc_kick_pseqs(); +	if (ret) +		goto err_unlock; +  	sch = scx_alloc_and_add_sched(ops);  	if (IS_ERR(sch)) {  		ret = PTR_ERR(sch); -		goto err_unlock; +		goto err_free_pseqs;  	}  	/* @@ -4701,6 +4792,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)  	return 0; +err_free_pseqs: +	free_kick_pseqs();  err_unlock:  	mutex_unlock(&scx_enable_mutex);  	return ret; @@ -5082,10 +5175,18 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)  {  	struct rq *this_rq = this_rq();  	struct scx_rq *this_scx = &this_rq->scx; -	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); +	struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs);  	bool should_wait = false; +	unsigned long *pseqs;  	s32 cpu; +	if (unlikely(!pseqs_pcpu)) { +		pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs"); +		return; +	} + +	pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs; +  	for_each_cpu(cpu, this_scx->cpus_to_kick) {  		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);  		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); @@ -5208,11 +5309,6 @@ void __init init_sched_ext_class(void)  	scx_idle_init_masks(); -	scx_kick_cpus_pnt_seqs = -		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, -			       __alignof__(scx_kick_cpus_pnt_seqs[0])); -	BUG_ON(!scx_kick_cpus_pnt_seqs); -  	for_each_possible_cpu(cpu) {  		struct rq *rq = cpu_rq(cpu);  		int  n = cpu_to_node(cpu); @@ -5688,8 +5784,8 @@ BTF_KFUNCS_START(scx_kfunc_ids_dispatch)  BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)  BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)  BTF_KFUNCS_END(scx_kfunc_ids_dispatch) @@ -5820,8 +5916,8 @@ __bpf_kfunc_end_defs();  BTF_KFUNCS_START(scx_kfunc_ids_unlocked)  BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)  BTF_KFUNCS_END(scx_kfunc_ids_unlocked) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cee1793e8277..25970dbbb279 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6437,6 +6437,16 @@ static void sync_throttle(struct task_group *tg, int cpu)  	cfs_rq->throttle_count = pcfs_rq->throttle_count;  	cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); + +	/* +	 * It is not enough to sync the "pelt_clock_throttled" indicator +	 * with the parent cfs_rq when the hierarchy is not queued. +	 * Always join a throttled hierarchy with PELT clock throttled +	 * and leaf it to the first enqueue, or distribution to +	 * unthrottle the PELT clock. +	 */ +	if (cfs_rq->throttle_count) +		cfs_rq->pelt_clock_throttled = 1;  }  /* conditionally throttle active cfs_rq's from put_prev_entity() */ @@ -13187,6 +13197,8 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)  		if (!cfs_rq_pelt_clock_throttled(cfs_rq))  			list_add_leaf_cfs_rq(cfs_rq);  	} + +	assert_list_leaf_cfs_rq(rq_of(cfs_rq));  }  #else /* !CONFIG_FAIR_GROUP_SCHED: */  static void propagate_entity_cfs_rq(struct sched_entity *se) { } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1f5d07067f60..adfb6e3409d7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -784,6 +784,7 @@ enum scx_rq_flags {  	SCX_RQ_BAL_KEEP		= 1 << 3, /* balance decided to keep current */  	SCX_RQ_BYPASSING	= 1 << 4,  	SCX_RQ_CLK_VALID	= 1 << 5, /* RQ clock is fresh and valid */ +	SCX_RQ_BAL_CB_PENDING	= 1 << 6, /* must queue a cb after dispatching */  	SCX_RQ_IN_WAKEUP	= 1 << 16,  	SCX_RQ_IN_BALANCE	= 1 << 17, @@ -3740,11 +3741,9 @@ static inline int mm_cid_get(struct rq *rq, struct task_struct *t,  			     struct mm_struct *mm)  {  	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; -	struct cpumask *cpumask;  	int cid;  	lockdep_assert_rq_held(rq); -	cpumask = mm_cidmask(mm);  	cid = __this_cpu_read(pcpu_cid->cid);  	if (mm_cid_is_valid(cid)) {  		mm_cid_snapshot_time(rq, mm); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b6974fce800c..3a4d3b2e3f74 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -3070,7 +3070,7 @@ static int __init tk_aux_sysfs_init(void)  		return -ENOMEM;  	} -	for (int i = 0; i <= MAX_AUX_CLOCKS; i++) { +	for (int i = 0; i < MAX_AUX_CLOCKS; i++) {  		char id[2] = { [0] = '0' + i, };  		struct kobject *clk = kobject_create_and_add(id, auxo); diff --git a/kernel/trace/rv/monitors/pagefault/Kconfig b/kernel/trace/rv/monitors/pagefault/Kconfig index 5e16625f1653..0e013f00c33b 100644 --- a/kernel/trace/rv/monitors/pagefault/Kconfig +++ b/kernel/trace/rv/monitors/pagefault/Kconfig @@ -5,6 +5,7 @@ config RV_MON_PAGEFAULT  	select RV_LTL_MONITOR  	depends on RV_MON_RTAPP  	depends on X86 || RISCV +	depends on MMU  	default y  	select LTL_MON_EVENTS_ID  	bool "pagefault monitor" diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c index 48338520376f..43e9ea473cda 100644 --- a/kernel/trace/rv/rv.c +++ b/kernel/trace/rv/rv.c @@ -501,7 +501,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)  	list_for_each_entry_continue(mon, &rv_monitors_list, list) {  		if (mon->enabled) -			return mon; +			return &mon->list;  	}  	return NULL; @@ -509,7 +509,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)  static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)  { -	struct rv_monitor *mon; +	struct list_head *head;  	loff_t l;  	mutex_lock(&rv_interface_lock); @@ -517,15 +517,15 @@ static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)  	if (list_empty(&rv_monitors_list))  		return NULL; -	mon = list_entry(&rv_monitors_list, struct rv_monitor, list); +	head = &rv_monitors_list;  	for (l = 0; l <= *pos; ) { -		mon = enabled_monitors_next(m, mon, &l); -		if (!mon) +		head = enabled_monitors_next(m, head, &l); +		if (!head)  			break;  	} -	return mon; +	return head;  }  /*  | 
