summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/args.h4
-rw-r--r--include/linux/file.h13
-rw-r--r--include/linux/huge_mm.h13
-rw-r--r--include/linux/irqdomain.h16
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/rseq_entry.h2
-rw-r--r--include/trace/events/ceph.h234
8 files changed, 251 insertions, 35 deletions
diff --git a/include/linux/args.h b/include/linux/args.h
index 2e8e65d975c7..0562dc51435e 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -6,9 +6,9 @@
/*
* How do these macros work?
*
- * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * In __COUNT_ARGS() _0 to _15 are just placeholders from the start
* in order to make sure _n is positioned over the correct number
- * from 12 to 0 (depending on X, which is a variadic argument list).
+ * from 15 to 0 (depending on X, which is a variadic argument list).
* They serve no purpose other than occupying a position. Since each
* macro parameter must have a distinct identifier, those identifiers
* are as good as any.
diff --git a/include/linux/file.h b/include/linux/file.h
index cf389fde9bc2..27484b444d31 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -161,12 +161,10 @@ typedef struct fd_prepare class_fd_prepare_t;
/* Do not use directly. */
static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf)
{
- if (unlikely(fdf->err)) {
- if (likely(fdf->__fd >= 0))
- put_unused_fd(fdf->__fd);
- if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
- fput(fdf->__file);
- }
+ if (unlikely(fdf->__fd >= 0))
+ put_unused_fd(fdf->__fd);
+ if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
+ fput(fdf->__file);
}
/* Do not use directly. */
@@ -230,7 +228,8 @@ static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf)
VFS_WARN_ON_ONCE(fdp->__fd < 0); \
VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \
fd_install(fdp->__fd, fdp->__file); \
- fdp->__fd; \
+ retain_and_null_ptr(fdp->__file); \
+ take_fd(fdp->__fd); \
})
/* Do not use directly. */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ae7f21aad0ac..a4d9f964dfde 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -369,14 +369,13 @@ enum split_type {
SPLIT_TYPE_NON_UNIFORM,
};
-bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int folio_split_unmapped(struct folio *folio, unsigned int new_order);
-int min_order_for_split(struct folio *folio);
+unsigned int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
-bool folio_split_supported(struct folio *folio, unsigned int new_order,
- enum split_type split_type, bool warns);
+int folio_check_splittable(struct folio *folio, unsigned int new_order,
+ enum split_type split_type);
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
struct list_head *list);
@@ -407,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o
static inline int try_folio_split_to_order(struct folio *folio,
struct page *page, unsigned int new_order)
{
- if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM))
return split_huge_page_to_order(&folio->page, new_order);
return folio_split(folio, new_order, page, NULL);
}
@@ -631,10 +630,10 @@ static inline int split_huge_page(struct page *page)
return -EINVAL;
}
-static inline int min_order_for_split(struct folio *folio)
+static inline unsigned int min_order_for_split(struct folio *folio)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
- return -EINVAL;
+ return 0;
}
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 952d3c8dd6b7..62f81bbeb490 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -730,22 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_fwnode_handle(of_node),
- .hwirq_max = ~0U,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
const struct irq_domain_ops *ops,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7a1819c20643..15076261d0c2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -438,7 +438,7 @@ enum {
#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
#define VM_STACK INIT_VM_FLAG(STACK)
-#ifdef CONFIG_STACK_GROWS_UP
+#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
#else
#define VM_STACK_EARLY VM_NONE
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4398e027f450..75ef7c9f9307 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2289,7 +2289,7 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
-#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
+#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index c92167ff8a7f..a36b472627de 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -596,7 +596,7 @@ static __always_inline void rseq_exit_to_user_mode_legacy(void)
void __rseq_debug_syscall_return(struct pt_regs *regs);
-static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+static __always_inline void rseq_debug_syscall_return(struct pt_regs *regs)
{
if (static_branch_unlikely(&rseq_debug_enabled))
__rseq_debug_syscall_return(regs);
diff --git a/include/trace/events/ceph.h b/include/trace/events/ceph.h
new file mode 100644
index 000000000000..08cb0659fbfc
--- /dev/null
+++ b/include/trace/events/ceph.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Ceph filesystem support module tracepoints
+ *
+ * Copyright (C) 2025 IONOS SE. All Rights Reserved.
+ * Written by Max Kellermann (max.kellermann@ionos.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ceph
+
+#if !defined(_TRACE_CEPH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CEPH_H
+
+#include <linux/tracepoint.h>
+
+#define ceph_mdsc_suspend_reasons \
+ EM(ceph_mdsc_suspend_reason_no_mdsmap, "no-mdsmap") \
+ EM(ceph_mdsc_suspend_reason_no_active_mds, "no-active-mds") \
+ EM(ceph_mdsc_suspend_reason_rejected, "rejected") \
+ E_(ceph_mdsc_suspend_reason_session, "session")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum ceph_mdsc_suspend_reason { ceph_mdsc_suspend_reasons } __mode(byte);
+
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+ceph_mdsc_suspend_reasons;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(ceph_mdsc_submit_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ ),
+
+ TP_fast_assign(
+ struct inode *inode;
+
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+
+ inode = req->r_inode;
+ if (inode == NULL && req->r_dentry)
+ inode = d_inode(req->r_dentry);
+
+ if (inode) {
+ __entry->ino = ceph_ino(inode);
+ __entry->snap = ceph_snap(inode);
+ } else {
+ __entry->ino = __entry->snap = 0;
+ }
+ ),
+
+ TP_printk("R=%llu op=%s ino=%llx,%llx",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->ino, __entry->snap)
+);
+
+TRACE_EVENT(ceph_mdsc_suspend_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_mds_request *req,
+ enum ceph_mdsc_suspend_reason reason),
+
+ TP_ARGS(mdsc, session, req, reason),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ __field(enum ceph_mdsc_suspend_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session ? session->s_mds : -1;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("R=%llu op=%s reason=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __print_symbolic(__entry->reason, ceph_mdsc_suspend_reasons))
+);
+
+TRACE_EVENT(ceph_mdsc_resume_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ ),
+
+ TP_printk("R=%llu op=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op))
+);
+
+TRACE_EVENT(ceph_mdsc_send_request,
+ TP_PROTO(struct ceph_mds_session *session,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(session, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session->s_mds;
+ ),
+
+ TP_printk("R=%llu op=%s mds=%d",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->mds)
+);
+
+TRACE_EVENT(ceph_mdsc_complete_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, err)
+ __field(unsigned long, latency_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->err = req->r_err;
+ __entry->latency_ns = req->r_end_latency - req->r_start_latency;
+ ),
+
+ TP_printk("R=%llu op=%s err=%d latency_ns=%lu",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->err,
+ __entry->latency_ns)
+);
+
+TRACE_EVENT(ceph_handle_caps,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int op,
+ const struct ceph_vino *vino,
+ struct ceph_inode_info *inode,
+ u32 seq, u32 mseq, u32 issue_seq),
+
+ TP_ARGS(mdsc, session, op, vino, inode, seq, mseq, issue_seq),
+
+ TP_STRUCT__entry(
+ __field(int, mds)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ __field(u32, seq)
+ __field(u32, mseq)
+ __field(u32, issue_seq)
+ ),
+
+ TP_fast_assign(
+ __entry->mds = session->s_mds;
+ __entry->op = op;
+ __entry->ino = vino->ino;
+ __entry->snap = vino->snap;
+ __entry->seq = seq;
+ __entry->mseq = mseq;
+ __entry->issue_seq = issue_seq;
+ ),
+
+ TP_printk("mds=%d op=%s vino=%llx.%llx seq=%u iseq=%u mseq=%u",
+ __entry->mds,
+ ceph_cap_op_name(__entry->op),
+ __entry->ino,
+ __entry->snap,
+ __entry->seq,
+ __entry->issue_seq,
+ __entry->mseq)
+);
+
+#undef EM
+#undef E_
+#endif /* _TRACE_CEPH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>