summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorCarlos Maiolino <cem@kernel.org>2025-09-05 20:26:42 +0200
committerCarlos Maiolino <cem@kernel.org>2025-09-05 20:26:42 +0200
commit482c57805c722d420bce02b0942b4e15911ec115 (patch)
tree4429ddd4b96c47c9fc52bd21a879c5525dd842e7 /kernel/trace/trace_functions_graph.c
parent33ddc796ecbd50cd6211aa9e9eddbf4567038b49 (diff)
parent07c34f8cef69cb8eeef69c18d6cf0c04fbee3cb3 (diff)
Merge tag 'fix-scrub-reap-calculations_2025-09-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.18-merge
xfs: improve online repair reap calculations [6.18 v2 1/2] A few months ago, the multi-fsblock untorn writes patchset added a bunch of log intent item helper functions to estimate the number of intent items that could be added to a particular transaction. Those helpers enabled us to compute a safe upper bound on the number of blocks that could be written in an untorn fashion with filesystem-provided out of place writes. Currently, the online fsck code employs static limits on the number of intent items that it's willing to accrue to a single transaction when it's trying to reap what it thinks are the old blocks from a corrupt structure. There have been no problems reported with this approach after years of testing, but static limits are scary and gross because overestimating the intent item limit could result in transaction overflows and dead filesystems; and underestimating causes unnecessary overhead. This series uses the new log intent item size helpers to estimate the limits dynamically based on worst-case per-block repair work vs. the size of the scrub transaction. After several months of testing this, there don't seem to be any problems here either. v2: rearrange patches, add review tags This has been running on the djcloud for months with no problems. Enjoy! Signed-off-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 66e1a527cf1a..a7f4b9a47a71 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -27,14 +27,21 @@ struct fgraph_cpu_data {
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
};
+struct fgraph_ent_args {
+ struct ftrace_graph_ent_entry ent;
+ /* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */
+ unsigned long args[FTRACE_REGS_MAX_ARGS];
+};
+
struct fgraph_data {
struct fgraph_cpu_data __percpu *cpu_data;
/* Place to preserve last processed entry. */
union {
- struct ftrace_graph_ent_entry ent;
+ struct fgraph_ent_args ent;
+ /* TODO allow retaddr to have args */
struct fgraph_retaddr_ent_entry rent;
- } ent;
+ };
struct ftrace_graph_ret_entry ret;
int failed;
int cpu;
@@ -627,10 +634,13 @@ get_return_for_leaf(struct trace_iterator *iter,
* Save current and next entries for later reference
* if the output fails.
*/
- if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT))
- data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr;
- else
- data->ent.ent = *curr;
+ if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) {
+ data->rent = *(struct fgraph_retaddr_ent_entry *)curr;
+ } else {
+ int size = min((int)sizeof(data->ent), (int)iter->ent_size);
+
+ memcpy(&data->ent, curr, size);
+ }
/*
* If the next event is not a return type, then
* we only care about what type it is. Otherwise we can