summaryrefslogtreecommitdiff
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorPalmer Dabbelt <palmer@rivosinc.com>2025-03-26 15:54:12 -0700
committerPalmer Dabbelt <palmer@rivosinc.com>2025-03-26 15:54:12 -0700
commitdf02351331671abb26788bc13f6d276e26ae068f (patch)
tree4e1f8ee707cf6570f39feb47fc5bb151358ebd34 /kernel/bpf/syscall.c
parent2014c95afecee3e76ca4a56956a936e23283f05b (diff)
parent74f4bf9d15ad1d6862b828d486ed10ea0e874a23 (diff)
Merge tag 'riscv-mw1-6.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/alexghiti/linux into for-next
riscv patches for 6.15-rc1 * A bunch of fixes: - We were missing a secondary mmu notifier call when flushing the tlb which is required for IOMMU - Fix ftrace panics by saving the registers as expected by ftrace - Fix a couple of stimecmp usage related to cpu hotplug - Fix a bunch of issues in the misaligned probing handling * Perf improvements: - Introduce support for runtime constant improving perf of d_hash() - Add support for huge pfnmaps to improve tlb utilization - Use Zawrs to improve smp_cond_load8/16() used by the queued spinlocks * Hwprobe additions: - Add support for Zicntr and Zihpm - Add support for Zaamo and Zalrsc - Add support for bfloat16 extensiosn - Add support for Zicbom (only enabling clean and flush, not inval for security reasons) * Misc: - Add a kernel parameter to bypass the misaligned speed probing since we can't rely on Zicclsm * tag 'riscv-mw1-6.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/alexghiti/linux: (1585 commits) riscv: Add runtime constant support riscv: Move nop definition to insn-def.h Documentation/kernel-parameters: Add riscv unaligned speed parameters riscv: Add parameter for skipping access speed tests riscv: Fix set up of vector cpu hotplug callback riscv: Fix set up of cpu hotplug callbacks riscv: Change check_unaligned_access_speed_all_cpus to void riscv: Fix check_unaligned_access_all_cpus riscv: Fix riscv_online_cpu_vec riscv: Annotate unaligned access init functions KVM: riscv: selftests: Add Zaamo/Zalrsc extensions to get-reg-list test RISC-V: KVM: Allow Zaamo/Zalrsc extensions for Guest/VM riscv: hwprobe: export Zaamo and Zalrsc extensions riscv: add parsing for Zaamo and Zalrsc extensions dt-bindings: riscv: add Zaamo and Zalrsc ISA extension description riscv: fgraph: Fix stack layout to match __arch_ftrace_regs argument of ftrace_return_to_handler riscv: fgraph: Select HAVE_FUNCTION_GRAPH_TRACER depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS riscv: Fix missing __free_pages() in check_vector_unaligned_access() riscv: Fix the __riscv_copy_vec_words_unaligned implementation riscv: mm: Don't use %pK through printk ...
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c420edbfb7c8..e1e42e918ba7 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1035,7 +1035,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct bpf_map *map = filp->private_data;
- int err;
+ int err = 0;
if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
return -ENOTSUPP;
@@ -1059,24 +1059,33 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
err = -EACCES;
goto out;
}
+ bpf_map_write_active_inc(map);
}
+out:
+ mutex_unlock(&map->freeze_mutex);
+ if (err)
+ return err;
/* set default open/close callbacks */
vma->vm_ops = &bpf_map_default_vmops;
vma->vm_private_data = map;
vm_flags_clear(vma, VM_MAYEXEC);
+ /* If mapping is read-only, then disallow potentially re-mapping with
+ * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
+ * means that as far as BPF map's memory-mapped VMAs are concerned,
+ * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
+ * both should be set, so we can forget about VM_MAYWRITE and always
+ * check just VM_WRITE
+ */
if (!(vma->vm_flags & VM_WRITE))
- /* disallow re-mapping with PROT_WRITE */
vm_flags_clear(vma, VM_MAYWRITE);
err = map->ops->map_mmap(map, vma);
- if (err)
- goto out;
+ if (err) {
+ if (vma->vm_flags & VM_WRITE)
+ bpf_map_write_active_dec(map);
+ }
- if (vma->vm_flags & VM_MAYWRITE)
- bpf_map_write_active_inc(map);
-out:
- mutex_unlock(&map->freeze_mutex);
return err;
}
@@ -1968,8 +1977,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
return err;
}
-#define MAP_LOOKUP_RETRIES 3
-
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
@@ -1979,8 +1986,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
void __user *values = u64_to_user_ptr(attr->batch.values);
void __user *keys = u64_to_user_ptr(attr->batch.keys);
void *buf, *buf_prevkey, *prev_key, *key, *value;
- int err, retry = MAP_LOOKUP_RETRIES;
u32 value_size, cp, max_count;
+ int err;
if (attr->batch.elem_flags & ~BPF_F_LOCK)
return -EINVAL;
@@ -2026,14 +2033,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
err = bpf_map_copy_value(map, key, value,
attr->batch.elem_flags);
- if (err == -ENOENT) {
- if (retry) {
- retry--;
- continue;
- }
- err = -EINTR;
- break;
- }
+ if (err == -ENOENT)
+ goto next_key;
if (err)
goto free_buf;
@@ -2048,12 +2049,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
goto free_buf;
}
+ cp++;
+next_key:
if (!prev_key)
prev_key = buf_prevkey;
swap(prev_key, key);
- retry = MAP_LOOKUP_RETRIES;
- cp++;
cond_resched();
}