summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig21
-rw-r--r--security/Kconfig.hardening33
-rw-r--r--security/loadpin/Kconfig2
-rw-r--r--security/yama/yama_lsm.c9
4 files changed, 36 insertions, 29 deletions
diff --git a/security/Kconfig b/security/Kconfig
index f10dbf15c294..536061cf33a9 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -164,27 +164,6 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
-config HARDENED_USERCOPY
- bool "Harden memory copies between kernel and userspace"
- imply STRICT_DEVMEM
- help
- This option checks for obviously wrong memory regions when
- copying memory to/from the kernel (via copy_to_user() and
- copy_from_user() functions) by rejecting memory ranges that
- are larger than the specified heap object, span multiple
- separately allocated pages, are not on the process stack,
- or are part of the kernel text. This prevents entire classes
- of heap overflow exploits and similar kernel memory exposures.
-
-config FORTIFY_SOURCE
- bool "Harden common str/mem functions against buffer overflows"
- depends on ARCH_HAS_FORTIFY_SOURCE
- # https://github.com/llvm/llvm-project/issues/53645
- depends on !CC_IS_CLANG || !X86_32
- help
- Detect overflows of buffers in common string and memory functions
- where the compiler can determine and validate the buffer sizes.
-
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index b56e001e0c6a..c17366ce8224 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -280,6 +280,39 @@ config ZERO_CALL_USED_REGS
endmenu
+menu "Bounds checking"
+
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ # https://github.com/llvm/llvm-project/issues/53645
+ depends on !X86_32 || !CC_IS_CLANG || CLANG_VERSION >= 160000
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ imply STRICT_DEVMEM
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocated pages, are not on the process stack,
+ or are part of the kernel text. This prevents entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_DEFAULT_ON
+ bool "Harden memory copies by default"
+ depends on HARDENED_USERCOPY
+ default HARDENED_USERCOPY
+ help
+ This has the effect of setting "hardened_usercopy=on" on the kernel
+ command line. This can be disabled with "hardened_usercopy=off".
+
+endmenu
+
menu "Hardening of kernel data structures"
config LIST_HARDENED
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index 848f8b4a6019..aef63d3e30df 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -16,7 +16,7 @@ config SECURITY_LOADPIN_ENFORCE
depends on SECURITY_LOADPIN
# Module compression breaks LoadPin unless modules are decompressed in
# the kernel.
- depends on !MODULES || (MODULE_COMPRESS_NONE || MODULE_DECOMPRESS)
+ depends on !MODULE_COMPRESS || MODULE_DECOMPRESS
help
If selected, LoadPin will enforce pinning at boot. If not
selected, it can be enabled at boot with the kernel parameter
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 1971710620c1..3d064dd4e03f 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -222,7 +222,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc = -ENOSYS;
- struct task_struct *myself = current;
+ struct task_struct *myself;
switch (option) {
case PR_SET_PTRACER:
@@ -232,11 +232,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
* leader checking is handled later when walking the ancestry
* at the time of PTRACE_ATTACH check.
*/
- rcu_read_lock();
- if (!thread_group_leader(myself))
- myself = rcu_dereference(myself->group_leader);
- get_task_struct(myself);
- rcu_read_unlock();
+ myself = current->group_leader;
if (arg2 == 0) {
yama_ptracer_del(NULL, myself);
@@ -255,7 +251,6 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
}
}
- put_task_struct(myself);
break;
}