summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/rust/coding-guidelines.rst75
-rw-r--r--arch/x86/kernel/cpu/amd.c16
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c14
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--arch/x86/mm/tlb.c24
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--rust/kernel/alloc/kvec.rs2
-rw-r--r--rust/kernel/bitmap.rs8
-rw-r--r--rust/kernel/cpufreq.rs3
13 files changed, 155 insertions, 34 deletions
diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst
index 6ff9e754755d..3198be3a6d63 100644
--- a/Documentation/rust/coding-guidelines.rst
+++ b/Documentation/rust/coding-guidelines.rst
@@ -38,6 +38,81 @@ Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on
individual files, and does not require a kernel configuration. Sometimes it may
even work with broken code.
+Imports
+~~~~~~~
+
+``rustfmt``, by default, formats imports in a way that is prone to conflicts
+while merging and rebasing, since in some cases it condenses several items into
+the same line. For instance:
+
+.. code-block:: rust
+
+ // Do not use this style.
+ use crate::{
+ example1,
+ example2::{example3, example4, example5},
+ example6, example7,
+ example8::example9,
+ };
+
+Instead, the kernel uses a vertical layout that looks like this:
+
+.. code-block:: rust
+
+ use crate::{
+ example1,
+ example2::{
+ example3,
+ example4,
+ example5, //
+ },
+ example6,
+ example7,
+ example8::example9, //
+ };
+
+That is, each item goes into its own line, and braces are used as soon as there
+is more than one item in a list.
+
+The trailing empty comment allows to preserve this formatting. Not only that,
+``rustfmt`` will actually reformat imports vertically when the empty comment is
+added. That is, it is possible to easily reformat the original example into the
+expected style by running ``rustfmt`` on an input like:
+
+.. code-block:: rust
+
+ // Do not use this style.
+ use crate::{
+ example1,
+ example2::{example3, example4, example5, //
+ },
+ example6, example7,
+ example8::example9, //
+ };
+
+The trailing empty comment works for nested imports, as shown above, as well as
+for single item imports -- this can be useful to minimize diffs within patch
+series:
+
+.. code-block:: rust
+
+ use crate::{
+ example1, //
+ };
+
+The trailing empty comment works in any of the lines within the braces, but it
+is preferred to keep it in the last item, since it is reminiscent of the
+trailing comma in other formatters. Sometimes it may be simpler to avoid moving
+the comment several times within a patch series due to changes in the list.
+
+There may be cases where exceptions may need to be made, i.e. none of this is
+a hard rule. There is also code that is not migrated to this style yet, but
+please do not introduce code in other styles.
+
+Eventually, the goal is to get ``rustfmt`` to support this formatting style (or
+a similar one) automatically in a stable release without requiring the trailing
+empty comment. Thus, at some point, the goal is to remove those comments.
+
Comments
--------
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5398db4dedb4..ccaa51ce63f6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1355,11 +1355,23 @@ static __init int print_s5_reset_status_mmio(void)
return 0;
value = ioread32(addr);
- iounmap(addr);
/* Value with "all bits set" is an error response and should be ignored. */
- if (value == U32_MAX)
+ if (value == U32_MAX) {
+ iounmap(addr);
return 0;
+ }
+
+ /*
+ * Clear all reason bits so they won't be retained if the next reset
+ * does not update the register. Besides, some bits are never cleared by
+ * hardware so it's software's responsibility to clear them.
+ *
+ * Writing the value back effectively clears all reason bits as they are
+ * write-1-to-clear.
+ */
+ iowrite32(value, addr);
+ iounmap(addr);
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
if (!(value & BIT(i)))
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index c8945610d455..2cd25a0d4637 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *ignored)
{
+ struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
int cpu = cpumask_any(&d->hdr.cpu_mask);
+ struct arch_mbm_state *am;
u64 msr_val;
u32 prmid;
int ret;
@@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
ret = __rmid_read_phys(prmid, eventid, &msr_val);
- if (ret)
- return ret;
- *val = get_corrected_val(r, d, rmid, eventid, msr_val);
+ if (!ret) {
+ *val = get_corrected_val(r, d, rmid, eventid, msr_val);
+ } else if (ret == -EINVAL) {
+ am = get_arch_mbm_state(hw_dom, rmid, eventid);
+ if (am)
+ am->prev_msr = 0;
+ }
- return 0;
+ return ret;
}
static int __cntr_id_read(u32 cntr_id, u64 *val)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index d2d54b8c4dbb..970981893c9b 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
}
start = fix_addr(__cpa_addr(cpa, 0));
- end = fix_addr(__cpa_addr(cpa, cpa->numpages));
+ end = start + cpa->numpages * PAGE_SIZE;
if (cpa->force_flush_all)
end = TLB_FLUSH_ALL;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 39f80111e6f1..5d221709353e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
- barrier();
- /* Start receiving IPIs and then read tlb_gen (and LAM below) */
+ /*
+ * Make sure this CPU is set in mm_cpumask() such that we'll
+ * receive invalidation IPIs.
+ *
+ * Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
+ * operation, or explicitly provide one. Such that:
+ *
+ * switch_mm_irqs_off() flush_tlb_mm_range()
+ * smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
+ * smp_mb(); // here // smp_mb() implied
+ * atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
+ *
+ * we properly order against flush_tlb_mm_range(), where the
+ * loaded_mm load can happen in mative_flush_tlb_multi() ->
+ * should_flush_tlb().
+ *
+ * This way switch_mm() must see the new tlb_gen or
+ * flush_tlb_mm_range() must see the new loaded_mm, or both.
+ */
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
+ else
+ smp_mb();
+
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
ns = choose_new_asid(next, next_tlb_gen);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7541f6f85fcb..177e57c1a362 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9403,7 +9403,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
flags |= MAP_HUGETLB;
if (file) {
- struct inode *inode;
+ const struct inode *inode;
dev_t dev;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
@@ -9416,12 +9416,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
- name = file_path(file, buf, PATH_MAX - sizeof(u64));
+ name = d_path(file_user_path(file), buf, PATH_MAX - sizeof(u64));
if (IS_ERR(name)) {
name = "//toolong";
goto cpy_name;
}
- inode = file_inode(vma->vm_file);
+ inode = file_user_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
gen = inode->i_generation;
@@ -9492,7 +9492,7 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
if (!filter->path.dentry)
return false;
- if (d_inode(filter->path.dentry) != file_inode(file))
+ if (d_inode(filter->path.dentry) != file_user_inode(file))
return false;
if (filter->offset > offset + size)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 8709c69118b5..f11ceb8be8c4 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2765,6 +2765,9 @@ static void handle_swbp(struct pt_regs *regs)
handler_chain(uprobe, regs);
+ /* Try to optimize after first hit. */
+ arch_uprobe_optimize(&uprobe->arch, bp_vaddr);
+
/*
* If user decided to take execution elsewhere, it makes little sense
* to execute the original instruction, so let's skip it.
@@ -2772,9 +2775,6 @@ static void handle_swbp(struct pt_regs *regs)
if (instruction_pointer(regs) != bp_vaddr)
goto out;
- /* Try to optimize after first hit. */
- arch_uprobe_optimize(&uprobe->arch, bp_vaddr);
-
if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
goto out;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 198d2dd45f59..f1ebf67b48e2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8571,10 +8571,12 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq);
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
WARN(true, "Dying CPU not properly vacated!");
dump_rq_tasks(rq, KERN_WARNING);
}
+ dl_server_stop(&rq->fair_server);
rq_unlock_irqrestore(rq, &rf);
calc_load_migrate(rq);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 615411a0a881..7b7671060bf9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1582,6 +1582,9 @@ void dl_server_start(struct sched_dl_entity *dl_se)
if (!dl_server(dl_se) || dl_se->dl_server_active)
return;
+ if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
+ return;
+
dl_se->dl_server_active = 1;
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc0b7ce8a65d..cee1793e8277 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8920,21 +8920,21 @@ simple:
return p;
idle:
- if (!rf)
- return NULL;
-
- new_tasks = sched_balance_newidle(rq, rf);
+ if (rf) {
+ new_tasks = sched_balance_newidle(rq, rf);
- /*
- * Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is
- * possible for any higher priority task to appear. In that case we
- * must re-start the pick_next_entity() loop.
- */
- if (new_tasks < 0)
- return RETRY_TASK;
+ /*
+ * Because sched_balance_newidle() releases (and re-acquires)
+ * rq->lock, it is possible for any higher priority task to
+ * appear. In that case we must re-start the pick_next_entity()
+ * loop.
+ */
+ if (new_tasks < 0)
+ return RETRY_TASK;
- if (new_tasks > 0)
- goto again;
+ if (new_tasks > 0)
+ goto again;
+ }
/*
* rq is about to be idle, check if we need to update the
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index e94aebd084c8..ac8d6f763ae8 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -9,7 +9,7 @@ use super::{
};
use crate::{
fmt,
- page::AsPageIter,
+ page::AsPageIter, //
};
use core::{
borrow::{Borrow, BorrowMut},
diff --git a/rust/kernel/bitmap.rs b/rust/kernel/bitmap.rs
index 711b8368b38f..aa8fc7bf06fc 100644
--- a/rust/kernel/bitmap.rs
+++ b/rust/kernel/bitmap.rs
@@ -167,7 +167,9 @@ impl core::ops::Deref for BitmapVec {
let ptr = if self.nbits <= BITS_PER_LONG {
// SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
- unsafe { core::ptr::addr_of!(self.repr.bitmap) }
+ unsafe {
+ core::ptr::addr_of!(self.repr.bitmap)
+ }
} else {
// SAFETY: Bitmap is represented as array of `unsigned long`.
unsafe { self.repr.ptr.as_ptr() }
@@ -184,7 +186,9 @@ impl core::ops::DerefMut for BitmapVec {
let ptr = if self.nbits <= BITS_PER_LONG {
// SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
- unsafe { core::ptr::addr_of_mut!(self.repr.bitmap) }
+ unsafe {
+ core::ptr::addr_of_mut!(self.repr.bitmap)
+ }
} else {
// SAFETY: Bitmap is represented as array of `unsigned long`.
unsafe { self.repr.ptr.as_ptr() }
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index 21b5b9b8acc1..1a555fcb120a 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -38,8 +38,7 @@ use macros::vtable;
const CPUFREQ_NAME_LEN: usize = bindings::CPUFREQ_NAME_LEN as usize;
/// Default transition latency value in nanoseconds.
-pub const DEFAULT_TRANSITION_LATENCY_NS: u32 =
- bindings::CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+pub const DEFAULT_TRANSITION_LATENCY_NS: u32 = bindings::CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/// CPU frequency driver flags.
pub mod flags {