summaryrefslogtreecommitdiff
path: root/kernel/bpf/dmabuf_iter.c
blob: 4dd7ef7c145ca7c93de06d9d06f29487f0582544 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2025 Google LLC */
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>

static void *dmabuf_iter_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos)
		return NULL;

	return dma_buf_iter_begin();
}

static void *dmabuf_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct dma_buf *dmabuf = v;

	++*pos;

	return dma_buf_iter_next(dmabuf);
}

struct bpf_iter__dmabuf {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct dma_buf *, dmabuf);
};

static int __dmabuf_seq_show(struct seq_file *seq, void *v, bool in_stop)
{
	struct bpf_iter_meta meta = {
		.seq = seq,
	};
	struct bpf_iter__dmabuf ctx = {
		.meta = &meta,
		.dmabuf = v,
	};
	struct bpf_prog *prog = bpf_iter_get_info(&meta, in_stop);

	if (prog)
		return bpf_iter_run_prog(prog, &ctx);

	return 0;
}

static int dmabuf_iter_seq_show(struct seq_file *seq, void *v)
{
	return __dmabuf_seq_show(seq, v, false);
}

static void dmabuf_iter_seq_stop(struct seq_file *seq, void *v)
{
	struct dma_buf *dmabuf = v;

	if (dmabuf)
		dma_buf_put(dmabuf);
}

static const struct seq_operations dmabuf_iter_seq_ops = {
	.start	= dmabuf_iter_seq_start,
	.next	= dmabuf_iter_seq_next,
	.stop	= dmabuf_iter_seq_stop,
	.show	= dmabuf_iter_seq_show,
};

static void bpf_iter_dmabuf_show_fdinfo(const struct bpf_iter_aux_info *aux,
					struct seq_file *seq)
{
	seq_puts(seq, "dmabuf iter\n");
}

static const struct bpf_iter_seq_info dmabuf_iter_seq_info = {
	.seq_ops		= &dmabuf_iter_seq_ops,
	.init_seq_private	= NULL,
	.fini_seq_private	= NULL,
	.seq_priv_size		= 0,
};

static struct bpf_iter_reg bpf_dmabuf_reg_info = {
	.target			= "dmabuf",
	.feature                = BPF_ITER_RESCHED,
	.show_fdinfo		= bpf_iter_dmabuf_show_fdinfo,
	.ctx_arg_info_size	= 1,
	.ctx_arg_info		= {
		{ offsetof(struct bpf_iter__dmabuf, dmabuf),
		  PTR_TO_BTF_ID_OR_NULL },
	},
	.seq_info		= &dmabuf_iter_seq_info,
};

DEFINE_BPF_ITER_FUNC(dmabuf, struct bpf_iter_meta *meta, struct dma_buf *dmabuf)
BTF_ID_LIST_SINGLE(bpf_dmabuf_btf_id, struct, dma_buf)

static int __init dmabuf_iter_init(void)
{
	bpf_dmabuf_reg_info.ctx_arg_info[0].btf_id = bpf_dmabuf_btf_id[0];
	return bpf_iter_reg_target(&bpf_dmabuf_reg_info);
}

late_initcall(dmabuf_iter_init);

struct bpf_iter_dmabuf {
	/*
	 * opaque iterator state; having __u64 here allows to preserve correct
	 * alignment requirements in vmlinux.h, generated from BTF
	 */
	__u64 __opaque[1];
} __aligned(8);

/* Non-opaque version of bpf_iter_dmabuf */
struct bpf_iter_dmabuf_kern {
	struct dma_buf *dmabuf;
} __aligned(8);

__bpf_kfunc_start_defs();

__bpf_kfunc int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it)
{
	struct bpf_iter_dmabuf_kern *kit = (void *)it;

	BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
	BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));

	kit->dmabuf = NULL;
	return 0;
}

__bpf_kfunc struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it)
{
	struct bpf_iter_dmabuf_kern *kit = (void *)it;

	if (kit->dmabuf)
		kit->dmabuf = dma_buf_iter_next(kit->dmabuf);
	else
		kit->dmabuf = dma_buf_iter_begin();

	return kit->dmabuf;
}

__bpf_kfunc void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it)
{
	struct bpf_iter_dmabuf_kern *kit = (void *)it;

	if (kit->dmabuf)
		dma_buf_put(kit->dmabuf);
}

__bpf_kfunc_end_defs();