summaryrefslogtreecommitdiff
path: root/mm/hugetlb_internal.h
blob: 1d2f870deccfe3fee710391848a1c3e3efd8a030 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Internal HugeTLB definitions.
 * (C) Nadia Yvette Chambers, April 2004
 */

#ifndef _LINUX_HUGETLB_INTERNAL_H
#define _LINUX_HUGETLB_INTERNAL_H

#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>

/*
 * Check if the hstate represents gigantic pages but gigantic page
 * runtime support is not available. This is a common condition used to
 * skip operations that cannot be performed on gigantic pages when runtime
 * support is disabled.
 */
static inline bool hstate_is_gigantic_no_runtime(struct hstate *h)
{
	return hstate_is_gigantic(h) && !gigantic_page_runtime_supported();
}

/*
 * common helper functions for hstate_next_node_to_{alloc|free}.
 * We may have allocated or freed a huge page based on a different
 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 * be outside of *nodes_allowed.  Ensure that we use an allowed
 * node for alloc or free.
 */
static inline int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
	nid = next_node_in(nid, *nodes_allowed);
	VM_BUG_ON(nid >= MAX_NUMNODES);

	return nid;
}

static inline int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{
	if (!node_isset(nid, *nodes_allowed))
		nid = next_node_allowed(nid, nodes_allowed);
	return nid;
}

/*
 * returns the previously saved node ["this node"] from which to
 * allocate a persistent huge page for the pool and advance the
 * next node from which to allocate, handling wrap at end of node
 * mask.
 */
static inline int hstate_next_node_to_alloc(int *next_node,
					    nodemask_t *nodes_allowed)
{
	int nid;

	VM_BUG_ON(!nodes_allowed);

	nid = get_valid_node_allowed(*next_node, nodes_allowed);
	*next_node = next_node_allowed(nid, nodes_allowed);

	return nid;
}

/*
 * helper for remove_pool_hugetlb_folio() - return the previously saved
 * node ["this node"] from which to free a huge page.  Advance the
 * next node id whether or not we find a free huge page to free so
 * that the next attempt to free addresses the next node.
 */
static inline int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
	int nid;

	VM_BUG_ON(!nodes_allowed);

	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);

	return nid;
}

#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask)		\
	for (nr_nodes = nodes_weight(*mask);				\
		nr_nodes > 0 &&						\
		((node = hstate_next_node_to_alloc(next_node, mask)) || 1);	\
		nr_nodes--)

#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
	for (nr_nodes = nodes_weight(*mask);				\
		nr_nodes > 0 &&						\
		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
		nr_nodes--)

extern void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
				 bool adjust_surplus);
extern void add_hugetlb_folio(struct hstate *h, struct folio *folio,
			      bool adjust_surplus);
extern void init_new_hugetlb_folio(struct folio *folio);
extern void prep_and_add_allocated_folios(struct hstate *h,
					  struct list_head *folio_list);
extern long demote_pool_huge_page(struct hstate *src,
				  nodemask_t *nodes_allowed,
				  unsigned long nr_to_demote);
extern ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
					   struct hstate *h, int nid,
					   unsigned long count, size_t len);

extern void hugetlb_sysfs_init(void) __init;

#ifdef CONFIG_SYSCTL
extern void hugetlb_sysctl_init(void);
#else
static inline void hugetlb_sysctl_init(void) { }
#endif

#endif /* _LINUX_HUGETLB_INTERNAL_H */