summaryrefslogtreecommitdiff
path: root/fs/bcachefs/fast_list.c
blob: 2faec143eb31c04d27c2472f43d539c2d8a27833 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
// SPDX-License-Identifier: GPL-2.0

/*
 * Fast, unordered lists
 *
 * Supports add, remove, and iterate
 *
 * Underneath, they're a radix tree and an IDA, with a percpu buffer for slot
 * allocation and freeing.
 *
 * This means that adding, removing, and iterating over items is lockless,
 * except when refilling/emptying the percpu slot buffers.
 */

#include "fast_list.h"

struct fast_list_pcpu {
	u32			nr;
	u32			entries[31];
};

static int fast_list_alloc_idx(struct fast_list *l, gfp_t gfp)
{
	int idx = ida_alloc_range(&l->slots_allocated, 1, INT_MAX, gfp);
	if (unlikely(idx < 0))
		return 0;

	if (unlikely(!genradix_ptr_alloc_inlined(&l->items, idx, gfp))) {
		ida_free(&l->slots_allocated, idx);
		return 0;
	}

	return idx;
}

/**
 * fast_list_get_idx - get a slot in a fast_list
 * @l:		list to get slot in
 *
 * This allocates a slot in the radix tree without storing to it, so that we can
 * take the potential memory allocation failure early and do the list add later
 * when we can't take an allocation failure.
 *
 * Returns: positive integer on success, -ENOMEM on failure
 */
int fast_list_get_idx(struct fast_list *l)
{
	unsigned long flags;
	int idx;
retry:
	local_irq_save(flags);
	struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer);

	if (unlikely(!lp->nr)) {
		u32 entries[16], nr = 0;

		local_irq_restore(flags);
		while (nr < ARRAY_SIZE(entries) &&
		       (idx = fast_list_alloc_idx(l, GFP_KERNEL)))
			entries[nr++] = idx;
		local_irq_save(flags);

		lp = this_cpu_ptr(l->buffer);

		while (nr && lp->nr < ARRAY_SIZE(lp->entries))
			lp->entries[lp->nr++] = entries[--nr];

		if (unlikely(nr)) {
			local_irq_restore(flags);
			while (nr)
				ida_free(&l->slots_allocated, entries[--nr]);
			goto retry;
		}

		if (unlikely(!lp->nr)) {
			local_irq_restore(flags);
			return -ENOMEM;
		}
	}

	idx = lp->entries[--lp->nr];
	local_irq_restore(flags);

	return idx;
}

/**
 * fast_list_add - add an item to a fast_list
 * @l:		list
 * @item:	item to add
 *
 * Allocates a slot in the radix tree and stores to it and then returns the
 * slot index, which must be passed to fast_list_remove().
 *
 * Returns: positive integer on success, -ENOMEM on failure
 */
int fast_list_add(struct fast_list *l, void *item)
{
	int idx = fast_list_get_idx(l);
	if (idx < 0)
		return idx;

	*genradix_ptr_inlined(&l->items, idx) = item;
	return idx;
}

/**
 * fast_list_remove - remove an item from a fast_list
 * @l:		list
 * @idx:	item's slot index
 *
 * Zeroes out the slot in the radix tree and frees the slot for future
 * fast_list_add() operations.
 */
void fast_list_remove(struct fast_list *l, unsigned idx)
{
	u32 entries[16], nr = 0;
	unsigned long flags;

	if (!idx)
		return;

	*genradix_ptr_inlined(&l->items, idx) = NULL;

	local_irq_save(flags);
	struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer);

	if (unlikely(lp->nr == ARRAY_SIZE(lp->entries)))
		while (nr < ARRAY_SIZE(entries))
			entries[nr++] = lp->entries[--lp->nr];

	lp->entries[lp->nr++] = idx;
	local_irq_restore(flags);

	if (unlikely(nr))
		while (nr)
			ida_free(&l->slots_allocated, entries[--nr]);
}

void fast_list_exit(struct fast_list *l)
{
	/* XXX: warn if list isn't empty */
	free_percpu(l->buffer);
	ida_destroy(&l->slots_allocated);
	genradix_free(&l->items);
}

int fast_list_init(struct fast_list *l)
{
	genradix_init(&l->items);
	ida_init(&l->slots_allocated);
	l->buffer = alloc_percpu(*l->buffer);
	if (!l->buffer)
		return -ENOMEM;
	return 0;
}