diff options
author | David S. Miller <davem@davemloft.net> | 2018-02-21 15:37:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-02-21 15:37:37 -0500 |
commit | bf006d18b74172c3562486b5e354b42cb5bcb261 (patch) | |
tree | a7437cb2d9b04240d29325c384ad198c74406563 /kernel/bpf/arraymap.c | |
parent | 6c4df17c7a529d460448cc8284b95a4ada37e3a3 (diff) | |
parent | b1a2ce825737b0165cc08e6f98f8c0ea1affdd60 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2018-02-20
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) Fix a memory leak in LPM trie's map_free() callback function, where
the trie structure itself was not freed since initial implementation.
Also a synchronize_rcu() was needed in order to wait for outstanding
programs accessing the trie to complete, from Yonghong.
2) Fix sock_map_alloc()'s error path in order to correctly propagate
the -EINVAL error in case of too large allocation requests. This
was just recently introduced when fixing close hooks via ULP layer,
fix from Eric.
3) Do not use GFP_ATOMIC in __cpu_map_entry_alloc(). Reason is that this
will not work with the recent __ptr_ring_init_queue_alloc() conversion
to kvmalloc_array(), where in case of fallback to vmalloc() that GFP
flag is invalid, from Jason.
4) Fix two recent syzkaller warnings: i) fix bpf_prog_array_copy_to_user()
when a prog query with a big number of ids was performed where we'd
otherwise trigger a warning from allocator side, ii) fix a missing
mlock precharge on arraymaps, from Daniel.
5) Two fixes for bpftool in order to avoid breaking JSON output when used
in batch mode, from Quentin.
6) Move a pr_debug() in libbpf in order to avoid having an otherwise
uninitialized variable in bpf_program__reloc_text(), from Jeremy.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r-- | kernel/bpf/arraymap.c | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index b1f66480135b..a364c408f25a 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr) static struct bpf_map *array_map_alloc(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; - int numa_node = bpf_map_attr_numa_node(attr); + int ret, numa_node = bpf_map_attr_numa_node(attr); u32 elem_size, index_mask, max_entries; bool unpriv = !capable(CAP_SYS_ADMIN); + u64 cost, array_size, mask64; struct bpf_array *array; - u64 array_size, mask64; elem_size = round_up(attr->value_size, 8); @@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) array_size += (u64) max_entries * elem_size; /* make sure there is no u32 overflow later in round_up() */ - if (array_size >= U32_MAX - PAGE_SIZE) + cost = array_size; + if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-ENOMEM); + if (percpu) { + cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); + if (cost >= U32_MAX - PAGE_SIZE) + return ERR_PTR(-ENOMEM); + } + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + ret = bpf_map_precharge_memlock(cost); + if (ret < 0) + return ERR_PTR(ret); /* allocate all map elements and zero-initialize them */ array = bpf_map_area_alloc(array_size, numa_node); @@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); + array->map.pages = cost; array->elem_size = elem_size; - if (!percpu) - goto out; - - array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); - - if (array_size >= U32_MAX - PAGE_SIZE || - bpf_array_alloc_percpu(array)) { + if (percpu && bpf_array_alloc_percpu(array)) { bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } -out: - array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; return &array->map; } |