summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig11
-rw-r--r--security/Kconfig.hardening4
-rw-r--r--security/Makefile2
-rw-r--r--security/apparmor/lsm.c3
-rw-r--r--security/commoncap.c64
-rw-r--r--security/integrity/digsig.c2
-rw-r--r--security/integrity/digsig_asymmetric.c30
-rw-r--r--security/integrity/iint.c10
-rw-r--r--security/integrity/ima/ima.h4
-rw-r--r--security/integrity/ima/ima_appraise.c2
-rw-r--r--security/integrity/ima/ima_kexec.c9
-rw-r--r--security/integrity/ima/ima_main.c23
-rw-r--r--security/integrity/ima/ima_policy.c2
-rw-r--r--security/integrity/ima/ima_template.c4
-rw-r--r--security/integrity/platform_certs/keyring_handler.c11
-rw-r--r--security/integrity/platform_certs/load_uefi.c20
-rw-r--r--security/keys/Kconfig3
-rw-r--r--security/keys/trusted-keys/Makefile6
-rw-r--r--security/keys/trusted-keys/tpm2key.asn111
-rw-r--r--security/keys/trusted-keys/trusted_core.c360
-rw-r--r--security/keys/trusted-keys/trusted_tee.c318
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c401
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c271
-rw-r--r--security/landlock/Kconfig21
-rw-r--r--security/landlock/Makefile4
-rw-r--r--security/landlock/common.h20
-rw-r--r--security/landlock/cred.c46
-rw-r--r--security/landlock/cred.h58
-rw-r--r--security/landlock/fs.c692
-rw-r--r--security/landlock/fs.h70
-rw-r--r--security/landlock/limits.h21
-rw-r--r--security/landlock/object.c67
-rw-r--r--security/landlock/object.h91
-rw-r--r--security/landlock/ptrace.c120
-rw-r--r--security/landlock/ptrace.h14
-rw-r--r--security/landlock/ruleset.c473
-rw-r--r--security/landlock/ruleset.h165
-rw-r--r--security/landlock/setup.c40
-rw-r--r--security/landlock/setup.h18
-rw-r--r--security/landlock/syscalls.c451
-rw-r--r--security/security.c71
-rw-r--r--security/selinux/hooks.c228
-rw-r--r--security/selinux/ima.c87
-rw-r--r--security/selinux/include/classmap.h5
-rw-r--r--security/selinux/include/ima.h6
-rw-r--r--security/selinux/include/objsec.h6
-rw-r--r--security/selinux/include/security.h17
-rw-r--r--security/selinux/nlmsgtab.c5
-rw-r--r--security/selinux/selinuxfs.c28
-rw-r--r--security/selinux/ss/avtab.c101
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--security/selinux/ss/conditional.c12
-rw-r--r--security/selinux/ss/hashtab.c2
-rw-r--r--security/selinux/ss/services.c225
-rw-r--r--security/selinux/ss/sidtab.c21
-rw-r--r--security/selinux/ss/sidtab.h4
-rw-r--r--security/smack/smack.h24
-rw-r--r--security/smack/smack_lsm.c74
-rw-r--r--security/tomoyo/network.c2
59 files changed, 4168 insertions, 694 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 7561f6f99f1d..0ced7fd33e4d 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -238,6 +238,7 @@ source "security/loadpin/Kconfig"
source "security/yama/Kconfig"
source "security/safesetid/Kconfig"
source "security/lockdown/Kconfig"
+source "security/landlock/Kconfig"
source "security/integrity/Kconfig"
@@ -277,11 +278,11 @@ endchoice
config LSM
string "Ordered list of enabled LSMs"
- default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor,bpf" if DEFAULT_SECURITY_SMACK
- default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo,bpf" if DEFAULT_SECURITY_APPARMOR
- default "lockdown,yama,loadpin,safesetid,integrity,tomoyo,bpf" if DEFAULT_SECURITY_TOMOYO
- default "lockdown,yama,loadpin,safesetid,integrity,bpf" if DEFAULT_SECURITY_DAC
- default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf"
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor,bpf" if DEFAULT_SECURITY_SMACK
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo,bpf" if DEFAULT_SECURITY_APPARMOR
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,tomoyo,bpf" if DEFAULT_SECURITY_TOMOYO
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" if DEFAULT_SECURITY_DAC
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf"
help
A comma-separated list of LSMs, in initialization order.
Any LSMs left off this list will be ignored. This can be
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index 269967c4fc1b..a56c36470cb1 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -64,7 +64,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF
bool "zero-init structs passed by reference (strong)"
depends on GCC_PLUGINS
- depends on !(KASAN && KASAN_STACK=1)
+ depends on !(KASAN && KASAN_STACK)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any structures on the stack that may
@@ -82,7 +82,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
bool "zero-init anything passed by reference (very strong)"
depends on GCC_PLUGINS
- depends on !(KASAN && KASAN_STACK=1)
+ depends on !(KASAN && KASAN_STACK)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any stack variables that may be passed
diff --git a/security/Makefile b/security/Makefile
index 3baf435de541..47e432900e24 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -13,6 +13,7 @@ subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid
subdir-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown
subdir-$(CONFIG_BPF_LSM) += bpf
+subdir-$(CONFIG_SECURITY_LANDLOCK) += landlock
# always enable default capabilities
obj-y += commoncap.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/
obj-$(CONFIG_CGROUPS) += device_cgroup.o
obj-$(CONFIG_BPF_LSM) += bpf/
+obj-$(CONFIG_SECURITY_LANDLOCK) += landlock/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 240a53387e6b..f72406fe1bf2 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1252,7 +1252,8 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(task_free, apparmor_task_free),
LSM_HOOK_INIT(task_alloc, apparmor_task_alloc),
- LSM_HOOK_INIT(task_getsecid, apparmor_task_getsecid),
+ LSM_HOOK_INIT(task_getsecid_subj, apparmor_task_getsecid),
+ LSM_HOOK_INIT(task_getsecid_obj, apparmor_task_getsecid),
LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
LSM_HOOK_INIT(task_kill, apparmor_task_kill),
diff --git a/security/commoncap.c b/security/commoncap.c
index 28f4d25480df..3f810d37b71b 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -50,7 +50,7 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
/**
* cap_capable - Determine whether a task has a particular effective capability
* @cred: The credentials to use
- * @ns: The user namespace in which we need the capability
+ * @targ_ns: The user namespace in which we need the capability
* @cap: The capability to check for
* @opts: Bitmask of options defined in include/linux/security.h
*
@@ -289,7 +289,7 @@ int cap_capset(struct cred *new,
* affects the security markings on that inode, and if it is, should
* inode_killpriv() be invoked or the change rejected.
*
- * Returns 1 if security.capability has a value, meaning inode_killpriv()
+ * Return: 1 if security.capability has a value, meaning inode_killpriv()
* is required, 0 otherwise, meaning inode_killpriv() is not required.
*/
int cap_inode_need_killpriv(struct dentry *dentry)
@@ -315,7 +315,7 @@ int cap_inode_need_killpriv(struct dentry *dentry)
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply passs init_user_ns.
*
- * Returns 0 if successful, -ve on error.
+ * Return: 0 if successful, -ve on error.
*/
int cap_inode_killpriv(struct user_namespace *mnt_userns, struct dentry *dentry)
{
@@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
&tmpbuf, size, GFP_NOFS);
dput(dentry);
- if (ret < 0)
+ if (ret < 0 || !tmpbuf)
return ret;
fs_ns = inode->i_sb->s_user_ns;
@@ -532,7 +532,7 @@ static bool validheader(size_t size, const struct vfs_cap_data *cap)
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply passs init_user_ns.
*
- * If all is ok, we return the new size, on error return < 0.
+ * Return: On success, return the new size; on error, return < 0.
*/
int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
const void **ivalue, size_t size)
@@ -543,8 +543,7 @@ int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
__u32 magic, nsmagic;
struct inode *inode = d_backing_inode(dentry);
struct user_namespace *task_ns = current_user_ns(),
- *fs_ns = inode->i_sb->s_user_ns,
- *ancestor;
+ *fs_ns = inode->i_sb->s_user_ns;
kuid_t rootid;
size_t newsize;
@@ -567,15 +566,6 @@ int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
if (nsrootid == -1)
return -EINVAL;
- /*
- * Do not allow allow adding a v3 filesystem capability xattr
- * if the rootid field is ambiguous.
- */
- for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
- if (from_kuid(ancestor, rootid) == 0)
- return -EINVAL;
- }
-
newsize = sizeof(struct vfs_ns_cap_data);
nscap = kmalloc(newsize, GFP_ATOMIC);
if (!nscap)
@@ -891,7 +881,9 @@ static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
- * which won't take effect immediately. Returns 0 if successful, -ve on error.
+ * which won't take effect immediately.
+ *
+ * Return: 0 if successful, -ve on error.
*/
int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
{
@@ -1127,7 +1119,9 @@ static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
* @flags: Indications of what has changed
*
* Fix up the results of setuid() call before the credential changes are
- * actually applied, returning 0 to grant the changes, -ve to deny them.
+ * actually applied.
+ *
+ * Return: 0 to grant the changes, -ve to deny them.
*/
int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
{
@@ -1197,7 +1191,9 @@ static int cap_safe_nice(struct task_struct *p)
* @p: The task to affect
*
* Detemine if the requested scheduler policy change is permitted for the
- * specified task, returning 0 if permission is granted, -ve if denied.
+ * specified task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p)
{
@@ -1205,12 +1201,14 @@ int cap_task_setscheduler(struct task_struct *p)
}
/**
- * cap_task_ioprio - Detemine if I/O priority change is permitted
+ * cap_task_setioprio - Detemine if I/O priority change is permitted
* @p: The task to affect
* @ioprio: The I/O priority to set
*
* Detemine if the requested I/O priority change is permitted for the specified
- * task, returning 0 if permission is granted, -ve if denied.
+ * task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setioprio(struct task_struct *p, int ioprio)
{
@@ -1218,12 +1216,14 @@ int cap_task_setioprio(struct task_struct *p, int ioprio)
}
/**
- * cap_task_ioprio - Detemine if task priority change is permitted
+ * cap_task_setnice - Detemine if task priority change is permitted
* @p: The task to affect
* @nice: The nice value to set
*
* Detemine if the requested task priority change is permitted for the
- * specified task, returning 0 if permission is granted, -ve if denied.
+ * specified task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setnice(struct task_struct *p, int nice)
{
@@ -1253,12 +1253,15 @@ static int cap_prctl_drop(unsigned long cap)
/**
* cap_task_prctl - Implement process control functions for this security module
* @option: The process control function requested
- * @arg2, @arg3, @arg4, @arg5: The argument data for this function
+ * @arg2: The argument data for this function
+ * @arg3: The argument data for this function
+ * @arg4: The argument data for this function
+ * @arg5: The argument data for this function
*
* Allow process control functions (sys_prctl()) to alter capabilities; may
* also deny access to other functions not otherwise implemented here.
*
- * Returns 0 or +ve on success, -ENOSYS if this function is not implemented
+ * Return: 0 or +ve on success, -ENOSYS if this function is not implemented
* here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
* modules will consider performing the function.
*/
@@ -1393,7 +1396,9 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
* @pages: The size of the mapping
*
* Determine whether the allocation of a new virtual mapping by the current
- * task is permitted, returning 1 if permission is granted, 0 if not.
+ * task is permitted.
+ *
+ * Return: 1 if permission is granted, 0 if not.
*/
int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
@@ -1406,14 +1411,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
return cap_sys_admin;
}
-/*
+/**
* cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
- * capability security module. Returns 0 if this mapping should be allowed
- * -EPERM if not.
+ * capability security module.
+ *
+ * Return: 0 if this mapping should be allowed or -EPERM if not.
*/
int cap_mmap_addr(unsigned long addr)
{
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 250fb0836156..3b06a01bd0fd 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -111,6 +111,8 @@ static int __init __integrity_init_keyring(const unsigned int id,
} else {
if (id == INTEGRITY_KEYRING_PLATFORM)
set_platform_trusted_keys(keyring[id]);
+ if (id == INTEGRITY_KEYRING_IMA)
+ load_module_cert(keyring[id]);
}
return err;
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index a662024b4c70..23240d793b07 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -84,6 +84,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
{
struct public_key_signature pks;
struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+ const struct public_key *pk;
struct key *key;
int ret;
@@ -105,23 +106,20 @@ int asymmetric_verify(struct key *keyring, const char *sig,
memset(&pks, 0, sizeof(pks));
pks.hash_algo = hash_algo_name[hdr->hash_algo];
- switch (hdr->hash_algo) {
- case HASH_ALGO_STREEBOG_256:
- case HASH_ALGO_STREEBOG_512:
- /* EC-RDSA and Streebog should go together. */
- pks.pkey_algo = "ecrdsa";
- pks.encoding = "raw";
- break;
- case HASH_ALGO_SM3_256:
- /* SM2 and SM3 should go together. */
- pks.pkey_algo = "sm2";
- pks.encoding = "raw";
- break;
- default:
- pks.pkey_algo = "rsa";
+
+ pk = asymmetric_key_public_key(key);
+ pks.pkey_algo = pk->pkey_algo;
+ if (!strcmp(pk->pkey_algo, "rsa"))
pks.encoding = "pkcs1";
- break;
- }
+ else if (!strncmp(pk->pkey_algo, "ecdsa-", 6))
+ /* edcsa-nist-p192 etc. */
+ pks.encoding = "x962";
+ else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+ !strcmp(pk->pkey_algo, "sm2"))
+ pks.encoding = "raw";
+ else
+ return -ENOPKG;
+
pks.digest = (u8 *)data;
pks.digest_size = datalen;
pks.s = hdr->sig;
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 1d20003243c3..fca8a9409e4a 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
struct rb_node *node, *parent = NULL;
struct integrity_iint_cache *iint, *test_iint;
+ /*
+ * The integrity's "iint_cache" is initialized at security_init(),
+ * unless it is not included in the ordered list of LSMs enabled
+ * on the boot command line.
+ */
+ if (!iint_cache)
+ panic("%s: lsm=integrity required.\n", __func__);
+
iint = integrity_iint_find(inode);
if (iint)
return iint;
@@ -152,7 +160,7 @@ void integrity_inode_free(struct inode *inode)
static void init_once(void *foo)
{
- struct integrity_iint_cache *iint = foo;
+ struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
memset(iint, 0, sizeof(*iint));
iint->ima_file_status = INTEGRITY_UNKNOWN;
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 8e8b5251550e..f0e448ed1f9f 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -24,10 +24,6 @@
#include "../integrity.h"
-#ifdef CONFIG_HAVE_IMA_KEXEC
-#include <asm/ima.h>
-#endif
-
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 565e33ff19d0..4e5eb0236278 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -76,7 +76,7 @@ int ima_must_appraise(struct user_namespace *mnt_userns, struct inode *inode,
if (!ima_appraise)
return 0;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
return ima_match_policy(mnt_userns, inode, current_cred(), secid, func,
mask, IMA_APPRAISE | IMA_HASH, NULL, NULL, NULL);
}
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index e29bea3dd4cc..667887665823 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
+#include <linux/of.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
@@ -123,12 +124,8 @@ void ima_add_kexec_buffer(struct kimage *image)
return;
}
- ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
- if (ret) {
- pr_err("Error passing over kexec measurement buffer.\n");
- return;
- }
-
+ image->ima_buffer_addr = kbuf.mem;
+ image->ima_buffer_size = kexec_segment_size;
image->ima_buffer = kexec_buffer;
pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9ef748ea829f..906c1d8e0b71 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -391,7 +391,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
u32 secid;
if (file && (prot & PROT_EXEC)) {
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
return process_measurement(file, current_cred(), secid, NULL,
0, MAY_EXEC, MMAP_CHECK);
}
@@ -429,7 +429,7 @@ int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot)
!(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC))
return 0;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
inode = file_inode(vma->vm_file);
action = ima_get_action(file_mnt_user_ns(vma->vm_file), inode,
current_cred(), secid, MAY_EXEC, MMAP_CHECK,
@@ -470,7 +470,7 @@ int ima_bprm_check(struct linux_binprm *bprm)
int ret;
u32 secid;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
MAY_EXEC, BPRM_CHECK);
if (ret)
@@ -482,7 +482,7 @@ int ima_bprm_check(struct linux_binprm *bprm)
}
/**
- * ima_path_check - based on policy, collect/store measurement.
+ * ima_file_check - based on policy, collect/store measurement.
* @file: pointer to the file to be measured
* @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
*
@@ -495,7 +495,7 @@ int ima_file_check(struct file *file, int mask)
{
u32 secid;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
return process_measurement(file, current_cred(), secid, NULL, 0,
mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
MAY_APPEND), FILE_CHECK);
@@ -606,6 +606,9 @@ void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
struct integrity_iint_cache *iint;
int must_appraise;
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
must_appraise = ima_must_appraise(mnt_userns, inode, MAY_ACCESS,
FILE_CHECK);
if (!must_appraise)
@@ -636,6 +639,9 @@ void ima_post_path_mknod(struct user_namespace *mnt_userns,
struct inode *inode = dentry->d_inode;
int must_appraise;
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
must_appraise = ima_must_appraise(mnt_userns, inode, MAY_ACCESS,
FILE_CHECK);
if (!must_appraise)
@@ -686,7 +692,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id,
/* Read entire file for all partial reads. */
func = read_idmap[read_id] ?: FILE_CHECK;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
return process_measurement(file, current_cred(), secid, NULL,
0, MAY_READ, func);
}
@@ -729,7 +735,7 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
}
func = read_idmap[read_id] ?: FILE_CHECK;
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
return process_measurement(file, current_cred(), secid, buf, size,
MAY_READ, func);
}
@@ -780,6 +786,7 @@ int ima_load_data(enum kernel_load_data_id id, bool contents)
pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
}
+ break;
default:
break;
}
@@ -872,7 +879,7 @@ void process_buffer_measurement(struct user_namespace *mnt_userns,
* buffer measurements.
*/
if (func) {
- security_task_getsecid(current, &secid);
+ security_task_getsecid_subj(current, &secid);
action = ima_get_action(mnt_userns, inode, current_cred(),
secid, 0, func, &pcr, &template,
func_data);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 4f8cb155e4fd..fd5d46e511f1 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -599,6 +599,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
rc = ima_filter_rule_match(secid, rule->lsm[i].type,
Audit_equal,
rule->lsm[i].rule);
+ break;
default:
break;
}
@@ -836,6 +837,7 @@ void __init ima_init_policy(void)
add_rules(default_measurement_rules,
ARRAY_SIZE(default_measurement_rules),
IMA_DEFAULT_POLICY);
+ break;
default:
break;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index e22e510ae92d..4e081e650047 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
}
}
- entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
- le32_to_cpu(*(hdr[HDR_PCR].data));
+ entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
+ le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
ret = ima_restore_measurement_entry(entry);
if (ret < 0)
break;
diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
index c5ba695c10e3..5604bd57c990 100644
--- a/security/integrity/platform_certs/keyring_handler.c
+++ b/security/integrity/platform_certs/keyring_handler.c
@@ -56,6 +56,15 @@ static __init void uefi_blacklist_binary(const char *source,
}
/*
+ * Add an X509 cert to the revocation list.
+ */
+static __init void uefi_revocation_list_x509(const char *source,
+ const void *data, size_t len)
+{
+ add_key_to_revocation_list(data, len);
+}
+
+/*
* Return the appropriate handler for particular signature list types found in
* the UEFI db and MokListRT tables.
*/
@@ -76,5 +85,7 @@ __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
return uefi_blacklist_x509_tbs;
if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
return uefi_blacklist_binary;
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return uefi_revocation_list_x509;
return 0;
}
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index ee4b4c666854..f290f78c3f30 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -132,8 +132,9 @@ static int __init load_moklist_certs(void)
static int __init load_uefi_certs(void)
{
efi_guid_t secure_var = EFI_IMAGE_SECURITY_DATABASE_GUID;
- void *db = NULL, *dbx = NULL;
- unsigned long dbsize = 0, dbxsize = 0;
+ efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
+ void *db = NULL, *dbx = NULL, *mokx = NULL;
+ unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0;
efi_status_t status;
int rc = 0;
@@ -175,6 +176,21 @@ static int __init load_uefi_certs(void)
kfree(dbx);
}
+ mokx = get_cert_list(L"MokListXRT", &mok_var, &mokxsize, &status);
+ if (!mokx) {
+ if (status == EFI_NOT_FOUND)
+ pr_debug("mokx variable wasn't found\n");
+ else
+ pr_info("Couldn't get mokx list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:MokListXRT",
+ mokx, mokxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse mokx signatures %d\n", rc);
+ kfree(mokx);
+ }
+
/* Load the MokListRT certs */
rc = load_moklist_certs();
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index c161642a8484..64b81abd087e 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -75,6 +75,9 @@ config TRUSTED_KEYS
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_HASH_INFO
+ select ASN1_ENCODER
+ select OID_REGISTRY
+ select ASN1
help
This option provides support for creating, sealing, and unsealing
keys in the kernel. Trusted keys are random number symmetric keys,
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
index 7b73cebbb378..feb8b6c3cc79 100644
--- a/security/keys/trusted-keys/Makefile
+++ b/security/keys/trusted-keys/Makefile
@@ -4,5 +4,11 @@
#
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_core.o
trusted-y += trusted_tpm1.o
+
+$(obj)/trusted_tpm2.o: $(obj)/tpm2key.asn1.h
trusted-y += trusted_tpm2.o
+trusted-y += tpm2key.asn1.o
+
+trusted-$(CONFIG_TEE) += trusted_tee.o
diff --git a/security/keys/trusted-keys/tpm2key.asn1 b/security/keys/trusted-keys/tpm2key.asn1
new file mode 100644
index 000000000000..f57f869ad600
--- /dev/null
+++ b/security/keys/trusted-keys/tpm2key.asn1
@@ -0,0 +1,11 @@
+---
+--- ASN.1 for TPM 2.0 keys
+---
+
+TPMKey ::= SEQUENCE {
+ type OBJECT IDENTIFIER ({tpm2_key_type}),
+ emptyAuth [0] EXPLICIT BOOLEAN OPTIONAL,
+ parent INTEGER ({tpm2_key_parent}),
+ pubkey OCTET STRING ({tpm2_key_pub}),
+ privkey OCTET STRING ({tpm2_key_priv})
+ }
diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
new file mode 100644
index 000000000000..d5c891d8d353
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_core.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (c) 2019-2021, Linaro Limited
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/trusted_tee.h>
+#include <keys/trusted_tpm.h>
+#include <linux/capability.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/static_call.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+static char *trusted_key_source;
+module_param_named(source, trusted_key_source, charp, 0);
+MODULE_PARM_DESC(source, "Select trusted keys source (tpm or tee)");
+
+static const struct trusted_key_source trusted_key_sources[] = {
+#if defined(CONFIG_TCG_TPM)
+ { "tpm", &trusted_key_tpm_ops },
+#endif
+#if defined(CONFIG_TEE)
+ { "tee", &trusted_key_tee_ops },
+#endif
+};
+
+DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init);
+DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
+DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
+ *trusted_key_sources[0].ops->unseal);
+DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
+ *trusted_key_sources[0].ops->get_random);
+DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit);
+static unsigned char migratable;
+
+enum {
+ Opt_err,
+ Opt_new, Opt_load, Opt_update,
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_err, NULL}
+};
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * payload structure
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char **datablob, struct trusted_key_payload *p)
+{
+ substring_t args[MAX_OPT_ARGS];
+ long keylen;
+ int ret = -EINVAL;
+ int key_cmd;
+ char *c;
+
+ /* main command */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ key_cmd = match_token(c, key_tokens, args);
+ switch (key_cmd) {
+ case Opt_new:
+ /* first argument is key size */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ ret = kstrtol(c, 10, &keylen);
+ if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+ return -EINVAL;
+ p->key_len = keylen;
+ ret = Opt_new;
+ break;
+ case Opt_load:
+ /* first argument is sealed blob */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ p->blob_len = strlen(c) / 2;
+ if (p->blob_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
+ ret = Opt_load;
+ break;
+ case Opt_update:
+ ret = Opt_update;
+ break;
+ case Opt_err:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+ struct trusted_key_payload *p = NULL;
+ int ret;
+
+ ret = key_payload_reserve(key, sizeof(*p));
+ if (ret < 0)
+ goto err;
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto err;
+
+ p->migratable = migratable;
+err:
+ return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *payload = NULL;
+ size_t datalen = prep->datalen;
+ char *datablob, *orig_datablob;
+ int ret = 0;
+ int key_cmd;
+ size_t key_len;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+
+ payload = trusted_payload_alloc(key);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key_cmd = datablob_parse(&datablob, payload);
+ if (key_cmd < 0) {
+ ret = key_cmd;
+ goto out;
+ }
+
+ dump_payload(payload);
+
+ switch (key_cmd) {
+ case Opt_load:
+ ret = static_call(trusted_key_unseal)(payload, datablob);
+ dump_payload(payload);
+ if (ret < 0)
+ pr_info("key_unseal failed (%d)\n", ret);
+ break;
+ case Opt_new:
+ key_len = payload->key_len;
+ ret = static_call(trusted_key_get_random)(payload->key,
+ key_len);
+ if (ret < 0)
+ goto out;
+
+ if (ret != key_len) {
+ pr_info("key_create failed (%d)\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = static_call(trusted_key_seal)(payload, datablob);
+ if (ret < 0)
+ pr_info("key_seal failed (%d)\n", ret);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ kfree_sensitive(orig_datablob);
+ if (!ret)
+ rcu_assign_keypointer(key, payload);
+ else
+ kfree_sensitive(payload);
+ return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+ struct trusted_key_payload *p;
+
+ p = container_of(rcu, struct trusted_key_payload, rcu);
+ kfree_sensitive(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *p;
+ struct trusted_key_payload *new_p;
+ size_t datalen = prep->datalen;
+ char *datablob, *orig_datablob;
+ int ret = 0;
+
+ if (key_is_negative(key))
+ return -ENOKEY;
+ p = key->payload.data[0];
+ if (!p->migratable)
+ return -EPERM;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+
+ new_p = trusted_payload_alloc(key);
+ if (!new_p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+ ret = datablob_parse(&datablob, new_p);
+ if (ret != Opt_update) {
+ ret = -EINVAL;
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ /* copy old key values, and reseal with new pcrs */
+ new_p->migratable = p->migratable;
+ new_p->key_len = p->key_len;
+ memcpy(new_p->key, p->key, p->key_len);
+ dump_payload(p);
+ dump_payload(new_p);
+
+ ret = static_call(trusted_key_seal)(new_p, datablob);
+ if (ret < 0) {
+ pr_info("key_seal failed (%d)\n", ret);
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ rcu_assign_keypointer(key, new_p);
+ call_rcu(&p->rcu, trusted_rcu_free);
+out:
+ kfree_sensitive(orig_datablob);
+ return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char *buffer,
+ size_t buflen)
+{
+ const struct trusted_key_payload *p;
+ char *bufp;
+ int i;
+
+ p = dereference_key_locked(key);
+ if (!p)
+ return -EINVAL;
+
+ if (buffer && buflen >= 2 * p->blob_len) {
+ bufp = buffer;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ }
+ return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - clear and free the key's payload
+ */
+static void trusted_destroy(struct key *key)
+{
+ kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_trusted = {
+ .name = "trusted",
+ .instantiate = trusted_instantiate,
+ .update = trusted_update,
+ .destroy = trusted_destroy,
+ .describe = user_describe,
+ .read = trusted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static int __init init_trusted(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(trusted_key_sources); i++) {
+ if (trusted_key_source &&
+ strncmp(trusted_key_source, trusted_key_sources[i].name,
+ strlen(trusted_key_sources[i].name)))
+ continue;
+
+ static_call_update(trusted_key_init,
+ trusted_key_sources[i].ops->init);
+ static_call_update(trusted_key_seal,
+ trusted_key_sources[i].ops->seal);
+ static_call_update(trusted_key_unseal,
+ trusted_key_sources[i].ops->unseal);
+ static_call_update(trusted_key_get_random,
+ trusted_key_sources[i].ops->get_random);
+ static_call_update(trusted_key_exit,
+ trusted_key_sources[i].ops->exit);
+ migratable = trusted_key_sources[i].ops->migratable;
+
+ ret = static_call(trusted_key_init)();
+ if (!ret)
+ break;
+ }
+
+ /*
+ * encrypted_keys.ko depends on successful load of this module even if
+ * trusted key implementation is not found.
+ */
+ if (ret == -ENODEV)
+ return 0;
+
+ return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+ static_call(trusted_key_exit)();
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
new file mode 100644
index 000000000000..2ce66c199e1d
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tee.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <keys/trusted_tee.h>
+
+#define DRIVER_NAME "trusted-key-tee"
+
+/*
+ * Get random data for symmetric key
+ *
+ * [out] memref[0] Random data
+ */
+#define TA_CMD_GET_RANDOM 0x0
+
+/*
+ * Seal trusted key using hardware unique key
+ *
+ * [in] memref[0] Plain key
+ * [out] memref[1] Sealed key datablob
+ */
+#define TA_CMD_SEAL 0x1
+
+/*
+ * Unseal trusted key using hardware unique key
+ *
+ * [in] memref[0] Sealed key datablob
+ * [out] memref[1] Plain key
+ */
+#define TA_CMD_UNSEAL 0x2
+
+/**
+ * struct trusted_key_tee_private - TEE Trusted key private data
+ * @dev: TEE based Trusted key device.
+ * @ctx: TEE context handler.
+ * @session_id: Trusted key TA session identifier.
+ * @shm_pool: Memory pool shared with TEE device.
+ */
+struct trusted_key_tee_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *shm_pool;
+};
+
+static struct trusted_key_tee_private pvt_data;
+
+/*
+ * Have the TEE seal(encrypt) the symmetric key
+ */
+static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
+ p->key_len, TEE_SHM_DMA_BUF |
+ TEE_SHM_KERNEL_MAPPED);
+ if (IS_ERR(reg_shm_in)) {
+ dev_err(pvt_data.dev, "key shm register failed\n");
+ return PTR_ERR(reg_shm_in);
+ }
+
+ reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
+ sizeof(p->blob), TEE_SHM_DMA_BUF |
+ TEE_SHM_KERNEL_MAPPED);
+ if (IS_ERR(reg_shm_out)) {
+ dev_err(pvt_data.dev, "blob shm register failed\n");
+ ret = PTR_ERR(reg_shm_out);
+ goto out;
+ }
+
+ inv_arg.func = TA_CMD_SEAL;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = reg_shm_in;
+ param[0].u.memref.size = p->key_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = reg_shm_out;
+ param[1].u.memref.size = sizeof(p->blob);
+ param[1].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_SEAL invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ p->blob_len = param[1].u.memref.size;
+ }
+
+out:
+ if (reg_shm_out)
+ tee_shm_free(reg_shm_out);
+ if (reg_shm_in)
+ tee_shm_free(reg_shm_in);
+
+ return ret;
+}
+
+/*
+ * Have the TEE unseal(decrypt) the symmetric key
+ */
+static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
+ p->blob_len, TEE_SHM_DMA_BUF |
+ TEE_SHM_KERNEL_MAPPED);
+ if (IS_ERR(reg_shm_in)) {
+ dev_err(pvt_data.dev, "blob shm register failed\n");
+ return PTR_ERR(reg_shm_in);
+ }
+
+ reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
+ sizeof(p->key), TEE_SHM_DMA_BUF |
+ TEE_SHM_KERNEL_MAPPED);
+ if (IS_ERR(reg_shm_out)) {
+ dev_err(pvt_data.dev, "key shm register failed\n");
+ ret = PTR_ERR(reg_shm_out);
+ goto out;
+ }
+
+ inv_arg.func = TA_CMD_UNSEAL;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = reg_shm_in;
+ param[0].u.memref.size = p->blob_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = reg_shm_out;
+ param[1].u.memref.size = sizeof(p->key);
+ param[1].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_UNSEAL invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ p->key_len = param[1].u.memref.size;
+ }
+
+out:
+ if (reg_shm_out)
+ tee_shm_free(reg_shm_out);
+ if (reg_shm_in)
+ tee_shm_free(reg_shm_in);
+
+ return ret;
+}
+
+/*
+ * Have the TEE generate random symmetric key
+ */
+static int trusted_tee_get_random(unsigned char *key, size_t key_len)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm = tee_shm_register(pvt_data.ctx, (unsigned long)key, key_len,
+ TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED);
+ if (IS_ERR(reg_shm)) {
+ dev_err(pvt_data.dev, "key shm register failed\n");
+ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_GET_RANDOM;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = key_len;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_GET_RANDOM invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ ret = param[0].u.memref.size;
+ }
+
+ tee_shm_free(reg_shm);
+
+ return ret;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int trusted_key_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ ret = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ ret = register_key_type(&key_type_trusted);
+ if (ret < 0)
+ goto out_sess;
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return ret;
+}
+
+static int trusted_key_remove(struct device *dev)
+{
+ unregister_key_type(&key_type_trusted);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id trusted_key_id_table[] = {
+ {UUID_INIT(0xf04a0fe7, 0x1f5d, 0x4b9b,
+ 0xab, 0xf7, 0x61, 0x9b, 0x85, 0xb4, 0xce, 0x8c)},
+ {}
+};
+MODULE_DEVICE_TABLE(tee, trusted_key_id_table);
+
+static struct tee_client_driver trusted_key_driver = {
+ .id_table = trusted_key_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = trusted_key_probe,
+ .remove = trusted_key_remove,
+ },
+};
+
+static int trusted_tee_init(void)
+{
+ return driver_register(&trusted_key_driver.driver);
+}
+
+static void trusted_tee_exit(void)
+{
+ driver_unregister(&trusted_key_driver.driver);
+}
+
+struct trusted_key_ops trusted_key_tee_ops = {
+ .migratable = 0, /* non-migratable */
+ .init = trusted_tee_init,
+ .seal = trusted_tee_seal,
+ .unseal = trusted_tee_unseal,
+ .get_random = trusted_tee_get_random,
+ .exit = trusted_tee_exit,
+};
diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
index 493eb91ed017..469394550801 100644
--- a/security/keys/trusted-keys/trusted_tpm1.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -1,29 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 IBM Corporation
- *
- * Author:
- * David Safford <safford@us.ibm.com>
+ * Copyright (c) 2019-2021, Linaro Limited
*
* See Documentation/security/keys/trusted-encrypted.rst
*/
#include <crypto/hash_info.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/parser.h>
#include <linux/string.h>
#include <linux/err.h>
-#include <keys/user-type.h>
#include <keys/trusted-type.h>
#include <linux/key-type.h>
-#include <linux/rcupdate.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
-#include <linux/capability.h>
#include <linux/tpm.h>
#include <linux/tpm_command.h>
@@ -63,7 +56,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ pr_info("can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
@@ -83,7 +76,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
sdesc = init_sdesc(hmacalg);
if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+ pr_info("can't alloc %s\n", hmac_alg);
return PTR_ERR(sdesc);
}
@@ -136,7 +129,7 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ pr_info("can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
@@ -212,7 +205,7 @@ int TSS_checkhmac1(unsigned char *buffer,
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ pr_info("can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_init(&sdesc->shash);
@@ -305,7 +298,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ pr_info("can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_init(&sdesc->shash);
@@ -597,12 +590,12 @@ static int tpm_unseal(struct tpm_buf *tb,
/* sessions for unsealing key and data */
ret = oiap(tb, &authhandle1, enonce1);
if (ret < 0) {
- pr_info("trusted_key: oiap failed (%d)\n", ret);
+ pr_info("oiap failed (%d)\n", ret);
return ret;
}
ret = oiap(tb, &authhandle2, enonce2);
if (ret < 0) {
- pr_info("trusted_key: oiap failed (%d)\n", ret);
+ pr_info("oiap failed (%d)\n", ret);
return ret;
}
@@ -612,7 +605,7 @@ static int tpm_unseal(struct tpm_buf *tb,
return ret;
if (ret != TPM_NONCE_SIZE) {
- pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+ pr_info("tpm_get_random failed (%d)\n", ret);
return -EIO;
}
ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
@@ -641,7 +634,7 @@ static int tpm_unseal(struct tpm_buf *tb,
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
- pr_info("trusted_key: authhmac failed (%d)\n", ret);
+ pr_info("authhmac failed (%d)\n", ret);
return ret;
}
@@ -653,7 +646,7 @@ static int tpm_unseal(struct tpm_buf *tb,
*datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
0);
if (ret < 0) {
- pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+ pr_info("TSS_checkhmac2 failed (%d)\n", ret);
return ret;
}
memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
@@ -680,7 +673,7 @@ static int key_seal(struct trusted_key_payload *p,
p->key, p->key_len + 1, p->blob, &p->blob_len,
o->blobauth, o->pcrinfo, o->pcrinfo_len);
if (ret < 0)
- pr_info("trusted_key: srkseal failed (%d)\n", ret);
+ pr_info("srkseal failed (%d)\n", ret);
tpm_buf_destroy(&tb);
return ret;
@@ -702,7 +695,7 @@ static int key_unseal(struct trusted_key_payload *p,
ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
o->blobauth, p->key, &p->key_len);
if (ret < 0)
- pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+ pr_info("srkunseal failed (%d)\n", ret);
else
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
@@ -713,7 +706,6 @@ static int key_unseal(struct trusted_key_payload *p,
enum {
Opt_err,
- Opt_new, Opt_load, Opt_update,
Opt_keyhandle, Opt_keyauth, Opt_blobauth,
Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
Opt_hash,
@@ -722,9 +714,6 @@ enum {
};
static const match_table_t key_tokens = {
- {Opt_new, "new"},
- {Opt_load, "load"},
- {Opt_update, "update"},
{Opt_keyhandle, "keyhandle=%s"},
{Opt_keyauth, "keyauth=%s"},
{Opt_blobauth, "blobauth=%s"},
@@ -758,6 +747,9 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
+ if (!c)
+ return 0;
+
while ((p = strsep(&c, " \t"))) {
if (*p == '\0' || *p == ' ' || *p == '\t')
continue;
@@ -791,13 +783,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
return -EINVAL;
break;
case Opt_blobauth:
- if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
- return -EINVAL;
- res = hex2bin(opt->blobauth, args[0].from,
- SHA1_DIGEST_SIZE);
- if (res < 0)
- return -EINVAL;
+ /*
+ * TPM 1.2 authorizations are sha1 hashes passed in as
+ * hex strings. TPM 2.0 authorizations are simple
+ * passwords (although it can take a hash as well)
+ */
+ opt->blobauth_len = strlen(args[0].from);
+
+ if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
+ res = hex2bin(opt->blobauth, args[0].from,
+ TPM_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+
+ opt->blobauth_len = TPM_DIGEST_SIZE;
+ break;
+ }
+
+ if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
+ memcpy(opt->blobauth, args[0].from,
+ opt->blobauth_len);
+ break;
+ }
+
+ return -EINVAL;
+
break;
+
case Opt_migratable:
if (*args[0].from == '0')
pay->migratable = 0;
@@ -822,7 +834,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
if (i == HASH_ALGO__LAST)
return -EINVAL;
if (!tpm2 && i != HASH_ALGO_SHA1) {
- pr_info("trusted_key: TPM 1.x only supports SHA-1.\n");
+ pr_info("TPM 1.x only supports SHA-1.\n");
return -EINVAL;
}
break;
@@ -851,71 +863,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
return 0;
}
-/*
- * datablob_parse - parse the keyctl data and fill in the
- * payload and options structures
- *
- * On success returns 0, otherwise -EINVAL.
- */
-static int datablob_parse(char *datablob, struct trusted_key_payload *p,
- struct trusted_key_options *o)
-{
- substring_t args[MAX_OPT_ARGS];
- long keylen;
- int ret = -EINVAL;
- int key_cmd;
- char *c;
-
- /* main command */
- c = strsep(&datablob, " \t");
- if (!c)
- return -EINVAL;
- key_cmd = match_token(c, key_tokens, args);
- switch (key_cmd) {
- case Opt_new:
- /* first argument is key size */
- c = strsep(&datablob, " \t");
- if (!c)
- return -EINVAL;
- ret = kstrtol(c, 10, &keylen);
- if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
- return -EINVAL;
- p->key_len = keylen;
- ret = getoptions(datablob, p, o);
- if (ret < 0)
- return ret;
- ret = Opt_new;
- break;
- case Opt_load:
- /* first argument is sealed blob */
- c = strsep(&datablob, " \t");
- if (!c)
- return -EINVAL;
- p->blob_len = strlen(c) / 2;
- if (p->blob_len > MAX_BLOB_SIZE)
- return -EINVAL;
- ret = hex2bin(p->blob, c, p->blob_len);
- if (ret < 0)
- return -EINVAL;
- ret = getoptions(datablob, p, o);
- if (ret < 0)
- return ret;
- ret = Opt_load;
- break;
- case Opt_update:
- /* all arguments are options */
- ret = getoptions(datablob, p, o);
- if (ret < 0)
- return ret;
- ret = Opt_update;
- break;
- case Opt_err:
- return -EINVAL;
- break;
- }
- return ret;
-}
-
static struct trusted_key_options *trusted_options_alloc(void)
{
struct trusted_key_options *options;
@@ -936,252 +883,99 @@ static struct trusted_key_options *trusted_options_alloc(void)
return options;
}
-static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+static int trusted_tpm_seal(struct trusted_key_payload *p, char *datablob)
{
- struct trusted_key_payload *p = NULL;
- int ret;
-
- ret = key_payload_reserve(key, sizeof *p);
- if (ret < 0)
- return p;
- p = kzalloc(sizeof *p, GFP_KERNEL);
- if (p)
- p->migratable = 1; /* migratable by default */
- return p;
-}
-
-/*
- * trusted_instantiate - create a new trusted key
- *
- * Unseal an existing trusted blob or, for a new key, get a
- * random key, then seal and create a trusted key-type key,
- * adding it to the specified keyring.
- *
- * On success, return 0. Otherwise return errno.
- */
-static int trusted_instantiate(struct key *key,
- struct key_preparsed_payload *prep)
-{
- struct trusted_key_payload *payload = NULL;
struct trusted_key_options *options = NULL;
- size_t datalen = prep->datalen;
- char *datablob;
int ret = 0;
- int key_cmd;
- size_t key_len;
int tpm2;
tpm2 = tpm_is_tpm2(chip);
if (tpm2 < 0)
return tpm2;
- if (datalen <= 0 || datalen > 32767 || !prep->data)
- return -EINVAL;
-
- datablob = kmalloc(datalen + 1, GFP_KERNEL);
- if (!datablob)
+ options = trusted_options_alloc();
+ if (!options)
return -ENOMEM;
- memcpy(datablob, prep->data, datalen);
- datablob[datalen] = '\0';
- options = trusted_options_alloc();
- if (!options) {
- ret = -ENOMEM;
- goto out;
- }
- payload = trusted_payload_alloc(key);
- if (!payload) {
- ret = -ENOMEM;
+ ret = getoptions(datablob, p, options);
+ if (ret < 0)
goto out;
- }
+ dump_options(options);
- key_cmd = datablob_parse(datablob, payload, options);
- if (key_cmd < 0) {
- ret = key_cmd;
+ if (!options->keyhandle && !tpm2) {
+ ret = -EINVAL;
goto out;
}
- if (!options->keyhandle) {
- ret = -EINVAL;
+ if (tpm2)
+ ret = tpm2_seal_trusted(chip, p, options);
+ else
+ ret = key_seal(p, options);
+ if (ret < 0) {
+ pr_info("key_seal failed (%d)\n", ret);
goto out;
}
- dump_payload(payload);
- dump_options(options);
-
- switch (key_cmd) {
- case Opt_load:
- if (tpm2)
- ret = tpm2_unseal_trusted(chip, payload, options);
- else
- ret = key_unseal(payload, options);
- dump_payload(payload);
- dump_options(options);
- if (ret < 0)
- pr_info("trusted_key: key_unseal failed (%d)\n", ret);
- break;
- case Opt_new:
- key_len = payload->key_len;
- ret = tpm_get_random(chip, payload->key, key_len);
- if (ret < 0)
- goto out;
-
- if (ret != key_len) {
- pr_info("trusted_key: key_create failed (%d)\n", ret);
- ret = -EIO;
+ if (options->pcrlock) {
+ ret = pcrlock(options->pcrlock);
+ if (ret < 0) {
+ pr_info("pcrlock failed (%d)\n", ret);
goto out;
}
- if (tpm2)
- ret = tpm2_seal_trusted(chip, payload, options);
- else
- ret = key_seal(payload, options);
- if (ret < 0)
- pr_info("trusted_key: key_seal failed (%d)\n", ret);
- break;
- default:
- ret = -EINVAL;
- goto out;
}
- if (!ret && options->pcrlock)
- ret = pcrlock(options->pcrlock);
out:
- kfree_sensitive(datablob);
kfree_sensitive(options);
- if (!ret)
- rcu_assign_keypointer(key, payload);
- else
- kfree_sensitive(payload);
return ret;
}
-static void trusted_rcu_free(struct rcu_head *rcu)
+static int trusted_tpm_unseal(struct trusted_key_payload *p, char *datablob)
{
- struct trusted_key_payload *p;
-
- p = container_of(rcu, struct trusted_key_payload, rcu);
- kfree_sensitive(p);
-}
-
-/*
- * trusted_update - reseal an existing key with new PCR values
- */
-static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
-{
- struct trusted_key_payload *p;
- struct trusted_key_payload *new_p;
- struct trusted_key_options *new_o;
- size_t datalen = prep->datalen;
- char *datablob;
+ struct trusted_key_options *options = NULL;
int ret = 0;
+ int tpm2;
- if (key_is_negative(key))
- return -ENOKEY;
- p = key->payload.data[0];
- if (!p->migratable)
- return -EPERM;
- if (datalen <= 0 || datalen > 32767 || !prep->data)
- return -EINVAL;
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
- datablob = kmalloc(datalen + 1, GFP_KERNEL);
- if (!datablob)
+ options = trusted_options_alloc();
+ if (!options)
return -ENOMEM;
- new_o = trusted_options_alloc();
- if (!new_o) {
- ret = -ENOMEM;
- goto out;
- }
- new_p = trusted_payload_alloc(key);
- if (!new_p) {
- ret = -ENOMEM;
- goto out;
- }
- memcpy(datablob, prep->data, datalen);
- datablob[datalen] = '\0';
- ret = datablob_parse(datablob, new_p, new_o);
- if (ret != Opt_update) {
- ret = -EINVAL;
- kfree_sensitive(new_p);
+ ret = getoptions(datablob, p, options);
+ if (ret < 0)
goto out;
- }
+ dump_options(options);
- if (!new_o->keyhandle) {
+ if (!options->keyhandle && !tpm2) {
ret = -EINVAL;
- kfree_sensitive(new_p);
goto out;
}
- /* copy old key values, and reseal with new pcrs */
- new_p->migratable = p->migratable;
- new_p->key_len = p->key_len;
- memcpy(new_p->key, p->key, p->key_len);
- dump_payload(p);
- dump_payload(new_p);
+ if (tpm2)
+ ret = tpm2_unseal_trusted(chip, p, options);
+ else
+ ret = key_unseal(p, options);
+ if (ret < 0)
+ pr_info("key_unseal failed (%d)\n", ret);
- ret = key_seal(new_p, new_o);
- if (ret < 0) {
- pr_info("trusted_key: key_seal failed (%d)\n", ret);
- kfree_sensitive(new_p);
- goto out;
- }
- if (new_o->pcrlock) {
- ret = pcrlock(new_o->pcrlock);
+ if (options->pcrlock) {
+ ret = pcrlock(options->pcrlock);
if (ret < 0) {
- pr_info("trusted_key: pcrlock failed (%d)\n", ret);
- kfree_sensitive(new_p);
+ pr_info("pcrlock failed (%d)\n", ret);
goto out;
}
}
- rcu_assign_keypointer(key, new_p);
- call_rcu(&p->rcu, trusted_rcu_free);
out:
- kfree_sensitive(datablob);
- kfree_sensitive(new_o);
+ kfree_sensitive(options);
return ret;
}
-/*
- * trusted_read - copy the sealed blob data to userspace in hex.
- * On success, return to userspace the trusted key datablob size.
- */
-static long trusted_read(const struct key *key, char *buffer,
- size_t buflen)
-{
- const struct trusted_key_payload *p;
- char *bufp;
- int i;
-
- p = dereference_key_locked(key);
- if (!p)
- return -EINVAL;
-
- if (buffer && buflen >= 2 * p->blob_len) {
- bufp = buffer;
- for (i = 0; i < p->blob_len; i++)
- bufp = hex_byte_pack(bufp, p->blob[i]);
- }
- return 2 * p->blob_len;
-}
-
-/*
- * trusted_destroy - clear and free the key's payload
- */
-static void trusted_destroy(struct key *key)
+static int trusted_tpm_get_random(unsigned char *key, size_t key_len)
{
- kfree_sensitive(key->payload.data[0]);
+ return tpm_get_random(chip, key, key_len);
}
-struct key_type key_type_trusted = {
- .name = "trusted",
- .instantiate = trusted_instantiate,
- .update = trusted_update,
- .destroy = trusted_destroy,
- .describe = user_describe,
- .read = trusted_read,
-};
-
-EXPORT_SYMBOL_GPL(key_type_trusted);
-
static void trusted_shash_release(void)
{
if (hashalg)
@@ -1196,14 +990,14 @@ static int __init trusted_shash_alloc(void)
hmacalg = crypto_alloc_shash(hmac_alg, 0, 0);
if (IS_ERR(hmacalg)) {
- pr_info("trusted_key: could not allocate crypto %s\n",
+ pr_info("could not allocate crypto %s\n",
hmac_alg);
return PTR_ERR(hmacalg);
}
hashalg = crypto_alloc_shash(hash_alg, 0, 0);
if (IS_ERR(hashalg)) {
- pr_info("trusted_key: could not allocate crypto %s\n",
+ pr_info("could not allocate crypto %s\n",
hash_alg);
ret = PTR_ERR(hashalg);
goto hashalg_fail;
@@ -1231,16 +1025,13 @@ static int __init init_digests(void)
return 0;
}
-static int __init init_trusted(void)
+static int __init trusted_tpm_init(void)
{
int ret;
- /* encrypted_keys.ko depends on successful load of this module even if
- * TPM is not used.
- */
chip = tpm_default_chip();
if (!chip)
- return 0;
+ return -ENODEV;
ret = init_digests();
if (ret < 0)
@@ -1261,7 +1052,7 @@ err_put:
return ret;
}
-static void __exit cleanup_trusted(void)
+static void trusted_tpm_exit(void)
{
if (chip) {
put_device(&chip->dev);
@@ -1271,7 +1062,11 @@ static void __exit cleanup_trusted(void)
}
}
-late_initcall(init_trusted);
-module_exit(cleanup_trusted);
-
-MODULE_LICENSE("GPL");
+struct trusted_key_ops trusted_key_tpm_ops = {
+ .migratable = 1, /* migratable by default */
+ .init = trusted_tpm_init,
+ .seal = trusted_tpm_seal,
+ .unseal = trusted_tpm_unseal,
+ .get_random = trusted_tpm_get_random,
+ .exit = trusted_tpm_exit,
+};
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
index e2a0ed5d02f0..617fabd4d913 100644
--- a/security/keys/trusted-keys/trusted_tpm2.c
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -4,6 +4,8 @@
* Copyright (C) 2014 Intel Corporation
*/
+#include <linux/asn1_encoder.h>
+#include <linux/oid_registry.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/tpm.h>
@@ -12,6 +14,10 @@
#include <keys/trusted-type.h>
#include <keys/trusted_tpm.h>
+#include <asm/unaligned.h>
+
+#include "tpm2key.asn1.h"
+
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
@@ -20,6 +26,165 @@ static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
};
+static u32 tpm2key_oid[] = { 2, 23, 133, 10, 1, 5 };
+
+static int tpm2_key_encode(struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u8 *src, u32 len)
+{
+ const int SCRATCH_SIZE = PAGE_SIZE;
+ u8 *scratch = kmalloc(SCRATCH_SIZE, GFP_KERNEL);
+ u8 *work = scratch, *work1;
+ u8 *end_work = scratch + SCRATCH_SIZE;
+ u8 *priv, *pub;
+ u16 priv_len, pub_len;
+
+ priv_len = get_unaligned_be16(src) + 2;
+ priv = src;
+
+ src += priv_len;
+
+ pub_len = get_unaligned_be16(src) + 2;
+ pub = src;
+
+ if (!scratch)
+ return -ENOMEM;
+
+ work = asn1_encode_oid(work, end_work, tpm2key_oid,
+ asn1_oid_len(tpm2key_oid));
+
+ if (options->blobauth_len == 0) {
+ unsigned char bool[3], *w = bool;
+ /* tag 0 is emptyAuth */
+ w = asn1_encode_boolean(w, w + sizeof(bool), true);
+ if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+ return PTR_ERR(w);
+ work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+ }
+
+ /*
+ * Assume both octet strings will encode to a 2 byte definite length
+ *
+ * Note: For a well behaved TPM, this warning should never
+ * trigger, so if it does there's something nefarious going on
+ */
+ if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+ "BUG: scratch buffer is too small"))
+ return -EINVAL;
+
+ work = asn1_encode_integer(work, end_work, options->keyhandle);
+ work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+ work = asn1_encode_octet_string(work, end_work, priv, priv_len);
+
+ work1 = payload->blob;
+ work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+ scratch, work - scratch);
+ if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+ return PTR_ERR(work1);
+
+ return work1 - payload->blob;
+}
+
+struct tpm2_key_context {
+ u32 parent;
+ const u8 *pub;
+ u32 pub_len;
+ const u8 *priv;
+ u32 priv_len;
+};
+
+static int tpm2_key_decode(struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u8 **buf)
+{
+ int ret;
+ struct tpm2_key_context ctx;
+ u8 *blob;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ret = asn1_ber_decoder(&tpm2key_decoder, &ctx, payload->blob,
+ payload->blob_len);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.priv_len + ctx.pub_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+
+ blob = kmalloc(ctx.priv_len + ctx.pub_len + 4, GFP_KERNEL);
+ if (!blob)
+ return -ENOMEM;
+
+ *buf = blob;
+ options->keyhandle = ctx.parent;
+
+ memcpy(blob, ctx.priv, ctx.priv_len);
+ blob += ctx.priv_len;
+
+ memcpy(blob, ctx.pub, ctx.pub_len);
+
+ return 0;
+}
+
+int tpm2_key_parent(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+ const u8 *v = value;
+ int i;
+
+ ctx->parent = 0;
+ for (i = 0; i < vlen; i++) {
+ ctx->parent <<= 8;
+ ctx->parent |= v[i];
+ }
+
+ return 0;
+}
+
+int tpm2_key_type(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ enum OID oid = look_up_OID(value, vlen);
+
+ if (oid != OID_TPMSealedData) {
+ char buffer[50];
+
+ sprint_oid(value, vlen, buffer, sizeof(buffer));
+ pr_debug("OID is \"%s\" which is not TPMSealedData\n",
+ buffer);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int tpm2_key_pub(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+
+ ctx->pub = value;
+ ctx->pub_len = vlen;
+
+ return 0;
+}
+
+int tpm2_key_priv(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+
+ ctx->priv = value;
+ ctx->priv_len = vlen;
+
+ return 0;
+}
+
/**
* tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
*
@@ -63,9 +228,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- unsigned int blob_len;
+ int blob_len = 0;
struct tpm_buf buf;
u32 hash;
+ u32 flags;
int i;
int rc;
@@ -79,7 +245,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
if (i == ARRAY_SIZE(tpm2_hash_map))
return -EINVAL;
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (!options->keyhandle)
+ return -EINVAL;
+
+ rc = tpm_try_get_ops(chip);
if (rc)
return rc;
@@ -97,29 +266,32 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
TPM_DIGEST_SIZE);
/* sensitive */
- tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+ tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len);
+
+ tpm_buf_append_u16(&buf, options->blobauth_len);
+ if (options->blobauth_len)
+ tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
- tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
- tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
- tpm_buf_append_u16(&buf, payload->key_len + 1);
+ tpm_buf_append_u16(&buf, payload->key_len);
tpm_buf_append(&buf, payload->key, payload->key_len);
- tpm_buf_append_u8(&buf, payload->migratable);
/* public */
tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
tpm_buf_append_u16(&buf, hash);
+ /* key properties */
+ flags = 0;
+ flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
+ flags |= payload->migratable ? (TPM2_OA_FIXED_TPM |
+ TPM2_OA_FIXED_PARENT) : 0;
+ tpm_buf_append_u32(&buf, flags);
+
/* policy */
- if (options->policydigest_len) {
- tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->policydigest_len);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ if (options->policydigest_len)
tpm_buf_append(&buf, options->policydigest,
options->policydigest_len);
- } else {
- tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
- tpm_buf_append_u16(&buf, 0);
- }
/* public parameters */
tpm_buf_append_u16(&buf, TPM_ALG_NULL);
@@ -150,8 +322,9 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
- payload->blob_len = blob_len;
+ blob_len = tpm2_key_encode(payload, options,
+ &buf.data[TPM_HEADER_SIZE + 4],
+ blob_len);
out:
tpm_buf_destroy(&buf);
@@ -162,6 +335,10 @@ out:
else
rc = -EPERM;
}
+ if (blob_len < 0)
+ return blob_len;
+
+ payload->blob_len = blob_len;
tpm_put_ops(chip);
return rc;
@@ -189,13 +366,45 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
unsigned int private_len;
unsigned int public_len;
unsigned int blob_len;
+ u8 *blob, *pub;
int rc;
+ u32 attrs;
+
+ rc = tpm2_key_decode(payload, options, &blob);
+ if (rc) {
+ /* old form */
+ blob = payload->blob;
+ payload->old_format = 1;
+ }
+
+ /* new format carries keyhandle but old format doesn't */
+ if (!options->keyhandle)
+ return -EINVAL;
- private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
- if (private_len > (payload->blob_len - 2))
+ /* must be big enough for at least the two be16 size counts */
+ if (payload->blob_len < 4)
+ return -EINVAL;
+
+ private_len = get_unaligned_be16(blob);
+
+ /* must be big enough for following public_len */
+ if (private_len + 2 + 2 > (payload->blob_len))
+ return -E2BIG;
+
+ public_len = get_unaligned_be16(blob + 2 + private_len);
+ if (private_len + 2 + public_len + 2 > payload->blob_len)
return -E2BIG;
- public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ pub = blob + 2 + private_len + 2;
+ /* key attributes are always at offset 4 */
+ attrs = get_unaligned_be32(pub + 4);
+
+ if ((attrs & (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT)) ==
+ (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT))
+ payload->migratable = 0;
+ else
+ payload->migratable = 1;
+
blob_len = private_len + public_len + 4;
if (blob_len > payload->blob_len)
return -E2BIG;
@@ -211,7 +420,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
options->keyauth /* hmac */,
TPM_DIGEST_SIZE);
- tpm_buf_append(&buf, payload->blob, blob_len);
+ tpm_buf_append(&buf, blob, blob_len);
if (buf.flags & TPM_BUF_OVERFLOW) {
rc = -E2BIG;
@@ -224,6 +433,8 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
(__be32 *) &buf.data[TPM_HEADER_SIZE]);
out:
+ if (blob != payload->blob)
+ kfree(blob);
tpm_buf_destroy(&buf);
if (rc > 0)
@@ -265,7 +476,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
NULL /* nonce */, 0,
TPM2_SA_CONTINUE_SESSION,
options->blobauth /* hmac */,
- TPM_DIGEST_SIZE);
+ options->blobauth_len);
rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
if (rc > 0)
@@ -274,7 +485,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
if (!rc) {
data_len = be16_to_cpup(
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
- if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE) {
rc = -EFAULT;
goto out;
}
@@ -285,9 +496,19 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
}
data = &buf.data[TPM_HEADER_SIZE + 6];
- memcpy(payload->key, data, data_len - 1);
- payload->key_len = data_len - 1;
- payload->migratable = data[data_len - 1];
+ if (payload->old_format) {
+ /* migratable flag is at the end of the key */
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ } else {
+ /*
+ * migratable flag already collected from key
+ * attributes
+ */
+ memcpy(payload->key, data, data_len);
+ payload->key_len = data_len;
+ }
}
out:
diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig
new file mode 100644
index 000000000000..8e33c4e8ffb8
--- /dev/null
+++ b/security/landlock/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SECURITY_LANDLOCK
+ bool "Landlock support"
+ depends on SECURITY && !ARCH_EPHEMERAL_INODES
+ select SECURITY_PATH
+ help
+ Landlock is a sandboxing mechanism that enables processes to restrict
+ themselves (and their future children) by gradually enforcing
+ tailored access control policies. A Landlock security policy is a
+ set of access rights (e.g. open a file in read-only, make a
+ directory, etc.) tied to a file hierarchy. Such policy can be
+ configured and enforced by any processes for themselves using the
+ dedicated system calls: landlock_create_ruleset(),
+ landlock_add_rule(), and landlock_restrict_self().
+
+ See Documentation/userspace-api/landlock.rst for further information.
+
+ If you are unsure how to answer this question, answer N. Otherwise,
+ you should also prepend "landlock," to the content of CONFIG_LSM to
+ enable Landlock at boot time.
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
new file mode 100644
index 000000000000..7bbd2f413b3e
--- /dev/null
+++ b/security/landlock/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
+
+landlock-y := setup.o syscalls.o object.o ruleset.o \
+ cred.o ptrace.o fs.o
diff --git a/security/landlock/common.h b/security/landlock/common.h
new file mode 100644
index 000000000000..5dc0fe15707d
--- /dev/null
+++ b/security/landlock/common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Common constants and helpers
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_COMMON_H
+#define _SECURITY_LANDLOCK_COMMON_H
+
+#define LANDLOCK_NAME "landlock"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) LANDLOCK_NAME ": " fmt
+
+#endif /* _SECURITY_LANDLOCK_COMMON_H */
diff --git a/security/landlock/cred.c b/security/landlock/cred.c
new file mode 100644
index 000000000000..6725af24c684
--- /dev/null
+++ b/security/landlock/cred.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Credential hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/cred.h>
+#include <linux/lsm_hooks.h>
+
+#include "common.h"
+#include "cred.h"
+#include "ruleset.h"
+#include "setup.h"
+
+static int hook_cred_prepare(struct cred *const new,
+ const struct cred *const old, const gfp_t gfp)
+{
+ struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
+
+ if (old_dom) {
+ landlock_get_ruleset(old_dom);
+ landlock_cred(new)->domain = old_dom;
+ }
+ return 0;
+}
+
+static void hook_cred_free(struct cred *const cred)
+{
+ struct landlock_ruleset *const dom = landlock_cred(cred)->domain;
+
+ if (dom)
+ landlock_put_ruleset_deferred(dom);
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
+ LSM_HOOK_INIT(cred_free, hook_cred_free),
+};
+
+__init void landlock_add_cred_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/cred.h b/security/landlock/cred.h
new file mode 100644
index 000000000000..5f99d3decade
--- /dev/null
+++ b/security/landlock/cred.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Credential hooks
+ *
+ * Copyright © 2019-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_CRED_H
+#define _SECURITY_LANDLOCK_CRED_H
+
+#include <linux/cred.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+
+#include "ruleset.h"
+#include "setup.h"
+
+struct landlock_cred_security {
+ struct landlock_ruleset *domain;
+};
+
+static inline struct landlock_cred_security *landlock_cred(
+ const struct cred *cred)
+{
+ return cred->security + landlock_blob_sizes.lbs_cred;
+}
+
+static inline const struct landlock_ruleset *landlock_get_current_domain(void)
+{
+ return landlock_cred(current_cred())->domain;
+}
+
+/*
+ * The call needs to come from an RCU read-side critical section.
+ */
+static inline const struct landlock_ruleset *landlock_get_task_domain(
+ const struct task_struct *const task)
+{
+ return landlock_cred(__task_cred(task))->domain;
+}
+
+static inline bool landlocked(const struct task_struct *const task)
+{
+ bool has_dom;
+
+ if (task == current)
+ return !!landlock_get_current_domain();
+
+ rcu_read_lock();
+ has_dom = !!landlock_get_task_domain(task);
+ rcu_read_unlock();
+ return has_dom;
+}
+
+__init void landlock_add_cred_hooks(void);
+
+#endif /* _SECURITY_LANDLOCK_CRED_H */
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
new file mode 100644
index 000000000000..97b8e421f617
--- /dev/null
+++ b/security/landlock/fs.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Filesystem management and hooks
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
+#include <linux/dcache.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lsm_hooks.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/path.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/wait_bit.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/landlock.h>
+
+#include "common.h"
+#include "cred.h"
+#include "fs.h"
+#include "limits.h"
+#include "object.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/* Underlying object management */
+
+static void release_inode(struct landlock_object *const object)
+ __releases(object->lock)
+{
+ struct inode *const inode = object->underobj;
+ struct super_block *sb;
+
+ if (!inode) {
+ spin_unlock(&object->lock);
+ return;
+ }
+
+ /*
+ * Protects against concurrent use by hook_sb_delete() of the reference
+ * to the underlying inode.
+ */
+ object->underobj = NULL;
+ /*
+ * Makes sure that if the filesystem is concurrently unmounted,
+ * hook_sb_delete() will wait for us to finish iput().
+ */
+ sb = inode->i_sb;
+ atomic_long_inc(&landlock_superblock(sb)->inode_refs);
+ spin_unlock(&object->lock);
+ /*
+ * Because object->underobj was not NULL, hook_sb_delete() and
+ * get_inode_object() guarantee that it is safe to reset
+ * landlock_inode(inode)->object while it is not NULL. It is therefore
+ * not necessary to lock inode->i_lock.
+ */
+ rcu_assign_pointer(landlock_inode(inode)->object, NULL);
+ /*
+ * Now, new rules can safely be tied to @inode with get_inode_object().
+ */
+
+ iput(inode);
+ if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
+ wake_up_var(&landlock_superblock(sb)->inode_refs);
+}
+
+static const struct landlock_object_underops landlock_fs_underops = {
+ .release = release_inode
+};
+
+/* Ruleset management */
+
+static struct landlock_object *get_inode_object(struct inode *const inode)
+{
+ struct landlock_object *object, *new_object;
+ struct landlock_inode_security *inode_sec = landlock_inode(inode);
+
+ rcu_read_lock();
+retry:
+ object = rcu_dereference(inode_sec->object);
+ if (object) {
+ if (likely(refcount_inc_not_zero(&object->usage))) {
+ rcu_read_unlock();
+ return object;
+ }
+ /*
+ * We are racing with release_inode(), the object is going
+ * away. Wait for release_inode(), then retry.
+ */
+ spin_lock(&object->lock);
+ spin_unlock(&object->lock);
+ goto retry;
+ }
+ rcu_read_unlock();
+
+ /*
+ * If there is no object tied to @inode, then create a new one (without
+ * holding any locks).
+ */
+ new_object = landlock_create_object(&landlock_fs_underops, inode);
+ if (IS_ERR(new_object))
+ return new_object;
+
+ /*
+ * Protects against concurrent calls to get_inode_object() or
+ * hook_sb_delete().
+ */
+ spin_lock(&inode->i_lock);
+ if (unlikely(rcu_access_pointer(inode_sec->object))) {
+ /* Someone else just created the object, bail out and retry. */
+ spin_unlock(&inode->i_lock);
+ kfree(new_object);
+
+ rcu_read_lock();
+ goto retry;
+ }
+
+ /*
+ * @inode will be released by hook_sb_delete() on its superblock
+ * shutdown, or by release_inode() when no more ruleset references the
+ * related object.
+ */
+ ihold(inode);
+ rcu_assign_pointer(inode_sec->object, new_object);
+ spin_unlock(&inode->i_lock);
+ return new_object;
+}
+
+/* All access rights that can be tied to files. */
+#define ACCESS_FILE ( \
+ LANDLOCK_ACCESS_FS_EXECUTE | \
+ LANDLOCK_ACCESS_FS_WRITE_FILE | \
+ LANDLOCK_ACCESS_FS_READ_FILE)
+
+/*
+ * @path: Should have been checked by get_path_from_fd().
+ */
+int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
+ const struct path *const path, u32 access_rights)
+{
+ int err;
+ struct landlock_object *object;
+
+ /* Files only get access rights that make sense. */
+ if (!d_is_dir(path->dentry) && (access_rights | ACCESS_FILE) !=
+ ACCESS_FILE)
+ return -EINVAL;
+ if (WARN_ON_ONCE(ruleset->num_layers != 1))
+ return -EINVAL;
+
+ /* Transforms relative access rights to absolute ones. */
+ access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
+ object = get_inode_object(d_backing_inode(path->dentry));
+ if (IS_ERR(object))
+ return PTR_ERR(object);
+ mutex_lock(&ruleset->lock);
+ err = landlock_insert_rule(ruleset, object, access_rights);
+ mutex_unlock(&ruleset->lock);
+ /*
+ * No need to check for an error because landlock_insert_rule()
+ * increments the refcount for the new object if needed.
+ */
+ landlock_put_object(object);
+ return err;
+}
+
+/* Access-control management */
+
+static inline u64 unmask_layers(
+ const struct landlock_ruleset *const domain,
+ const struct path *const path, const u32 access_request,
+ u64 layer_mask)
+{
+ const struct landlock_rule *rule;
+ const struct inode *inode;
+ size_t i;
+
+ if (d_is_negative(path->dentry))
+ /* Ignore nonexistent leafs. */
+ return layer_mask;
+ inode = d_backing_inode(path->dentry);
+ rcu_read_lock();
+ rule = landlock_find_rule(domain,
+ rcu_dereference(landlock_inode(inode)->object));
+ rcu_read_unlock();
+ if (!rule)
+ return layer_mask;
+
+ /*
+ * An access is granted if, for each policy layer, at least one rule
+ * encountered on the pathwalk grants the requested accesses,
+ * regardless of their position in the layer stack. We must then check
+ * the remaining layers for each inode, from the first added layer to
+ * the last one.
+ */
+ for (i = 0; i < rule->num_layers; i++) {
+ const struct landlock_layer *const layer = &rule->layers[i];
+ const u64 layer_level = BIT_ULL(layer->level - 1);
+
+ /* Checks that the layer grants access to the full request. */
+ if ((layer->access & access_request) == access_request) {
+ layer_mask &= ~layer_level;
+
+ if (layer_mask == 0)
+ return layer_mask;
+ }
+ }
+ return layer_mask;
+}
+
+static int check_access_path(const struct landlock_ruleset *const domain,
+ const struct path *const path, u32 access_request)
+{
+ bool allowed = false;
+ struct path walker_path;
+ u64 layer_mask;
+ size_t i;
+
+ /* Make sure all layers can be checked. */
+ BUILD_BUG_ON(BITS_PER_TYPE(layer_mask) < LANDLOCK_MAX_NUM_LAYERS);
+
+ if (!access_request)
+ return 0;
+ if (WARN_ON_ONCE(!domain || !path))
+ return 0;
+ /*
+ * Allows access to pseudo filesystems that will never be mountable
+ * (e.g. sockfs, pipefs), but can still be reachable through
+ * /proc/<pid>/fd/<file-descriptor> .
+ */
+ if ((path->dentry->d_sb->s_flags & SB_NOUSER) ||
+ (d_is_positive(path->dentry) &&
+ unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))))
+ return 0;
+ if (WARN_ON_ONCE(domain->num_layers < 1))
+ return -EACCES;
+
+ /* Saves all layers handling a subset of requested accesses. */
+ layer_mask = 0;
+ for (i = 0; i < domain->num_layers; i++) {
+ if (domain->fs_access_masks[i] & access_request)
+ layer_mask |= BIT_ULL(i);
+ }
+ /* An access request not handled by the domain is allowed. */
+ if (layer_mask == 0)
+ return 0;
+
+ walker_path = *path;
+ path_get(&walker_path);
+ /*
+ * We need to walk through all the hierarchy to not miss any relevant
+ * restriction.
+ */
+ while (true) {
+ struct dentry *parent_dentry;
+
+ layer_mask = unmask_layers(domain, &walker_path,
+ access_request, layer_mask);
+ if (layer_mask == 0) {
+ /* Stops when a rule from each layer grants access. */
+ allowed = true;
+ break;
+ }
+
+jump_up:
+ if (walker_path.dentry == walker_path.mnt->mnt_root) {
+ if (follow_up(&walker_path)) {
+ /* Ignores hidden mount points. */
+ goto jump_up;
+ } else {
+ /*
+ * Stops at the real root. Denies access
+ * because not all layers have granted access.
+ */
+ allowed = false;
+ break;
+ }
+ }
+ if (unlikely(IS_ROOT(walker_path.dentry))) {
+ /*
+ * Stops at disconnected root directories. Only allows
+ * access to internal filesystems (e.g. nsfs, which is
+ * reachable through /proc/<pid>/ns/<namespace>).
+ */
+ allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
+ break;
+ }
+ parent_dentry = dget_parent(walker_path.dentry);
+ dput(walker_path.dentry);
+ walker_path.dentry = parent_dentry;
+ }
+ path_put(&walker_path);
+ return allowed ? 0 : -EACCES;
+}
+
+static inline int current_check_access_path(const struct path *const path,
+ const u32 access_request)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ return check_access_path(dom, path, access_request);
+}
+
+/* Inode hooks */
+
+static void hook_inode_free_security(struct inode *const inode)
+{
+ /*
+ * All inodes must already have been untied from their object by
+ * release_inode() or hook_sb_delete().
+ */
+ WARN_ON_ONCE(landlock_inode(inode)->object);
+}
+
+/* Super-block hooks */
+
+/*
+ * Release the inodes used in a security policy.
+ *
+ * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
+ */
+static void hook_sb_delete(struct super_block *const sb)
+{
+ struct inode *inode, *prev_inode = NULL;
+
+ if (!landlock_initialized)
+ return;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ struct landlock_object *object;
+
+ /* Only handles referenced inodes. */
+ if (!atomic_read(&inode->i_count))
+ continue;
+
+ /*
+ * Protects against concurrent modification of inode (e.g.
+ * from get_inode_object()).
+ */
+ spin_lock(&inode->i_lock);
+ /*
+ * Checks I_FREEING and I_WILL_FREE to protect against a race
+ * condition when release_inode() just called iput(), which
+ * could lead to a NULL dereference of inode->security or a
+ * second call to iput() for the same Landlock object. Also
+ * checks I_NEW because such inode cannot be tied to an object.
+ */
+ if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+
+ rcu_read_lock();
+ object = rcu_dereference(landlock_inode(inode)->object);
+ if (!object) {
+ rcu_read_unlock();
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ /* Keeps a reference to this inode until the next loop walk. */
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+
+ /*
+ * If there is no concurrent release_inode() ongoing, then we
+ * are in charge of calling iput() on this inode, otherwise we
+ * will just wait for it to finish.
+ */
+ spin_lock(&object->lock);
+ if (object->underobj == inode) {
+ object->underobj = NULL;
+ spin_unlock(&object->lock);
+ rcu_read_unlock();
+
+ /*
+ * Because object->underobj was not NULL,
+ * release_inode() and get_inode_object() guarantee
+ * that it is safe to reset
+ * landlock_inode(inode)->object while it is not NULL.
+ * It is therefore not necessary to lock inode->i_lock.
+ */
+ rcu_assign_pointer(landlock_inode(inode)->object, NULL);
+ /*
+ * At this point, we own the ihold() reference that was
+ * originally set up by get_inode_object() and the
+ * __iget() reference that we just set in this loop
+ * walk. Therefore the following call to iput() will
+ * not sleep nor drop the inode because there is now at
+ * least two references to it.
+ */
+ iput(inode);
+ } else {
+ spin_unlock(&object->lock);
+ rcu_read_unlock();
+ }
+
+ if (prev_inode) {
+ /*
+ * At this point, we still own the __iget() reference
+ * that we just set in this loop walk. Therefore we
+ * can drop the list lock and know that the inode won't
+ * disappear from under us until the next loop walk.
+ */
+ spin_unlock(&sb->s_inode_list_lock);
+ /*
+ * We can now actually put the inode reference from the
+ * previous loop walk, which is not needed anymore.
+ */
+ iput(prev_inode);
+ cond_resched();
+ spin_lock(&sb->s_inode_list_lock);
+ }
+ prev_inode = inode;
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+
+ /* Puts the inode reference from the last loop walk, if any. */
+ if (prev_inode)
+ iput(prev_inode);
+ /* Waits for pending iput() in release_inode(). */
+ wait_var_event(&landlock_superblock(sb)->inode_refs, !atomic_long_read(
+ &landlock_superblock(sb)->inode_refs));
+}
+
+/*
+ * Because a Landlock security policy is defined according to the filesystem
+ * topology (i.e. the mount namespace), changing it may grant access to files
+ * not previously allowed.
+ *
+ * To make it simple, deny any filesystem topology modification by landlocked
+ * processes. Non-landlocked processes may still change the namespace of a
+ * landlocked process, but this kind of threat must be handled by a system-wide
+ * access-control security policy.
+ *
+ * This could be lifted in the future if Landlock can safely handle mount
+ * namespace updates requested by a landlocked process. Indeed, we could
+ * update the current domain (which is currently read-only) by taking into
+ * account the accesses of the source and the destination of a new mount point.
+ * However, it would also require to make all the child domains dynamically
+ * inherit these new constraints. Anyway, for backward compatibility reasons,
+ * a dedicated user space option would be required (e.g. as a ruleset flag).
+ */
+static int hook_sb_mount(const char *const dev_name,
+ const struct path *const path, const char *const type,
+ const unsigned long flags, void *const data)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+static int hook_move_mount(const struct path *const from_path,
+ const struct path *const to_path)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/*
+ * Removing a mount point may reveal a previously hidden file hierarchy, which
+ * may then grant access to files, which may have previously been forbidden.
+ */
+static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/*
+ * pivot_root(2), like mount(2), changes the current mount namespace. It must
+ * then be forbidden for a landlocked process.
+ *
+ * However, chroot(2) may be allowed because it only changes the relative root
+ * directory of the current process. Moreover, it can be used to restrict the
+ * view of the filesystem.
+ */
+static int hook_sb_pivotroot(const struct path *const old_path,
+ const struct path *const new_path)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/* Path hooks */
+
+static inline u32 get_mode_access(const umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFLNK:
+ return LANDLOCK_ACCESS_FS_MAKE_SYM;
+ case 0:
+ /* A zero mode translates to S_IFREG. */
+ case S_IFREG:
+ return LANDLOCK_ACCESS_FS_MAKE_REG;
+ case S_IFDIR:
+ return LANDLOCK_ACCESS_FS_MAKE_DIR;
+ case S_IFCHR:
+ return LANDLOCK_ACCESS_FS_MAKE_CHAR;
+ case S_IFBLK:
+ return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
+ case S_IFIFO:
+ return LANDLOCK_ACCESS_FS_MAKE_FIFO;
+ case S_IFSOCK:
+ return LANDLOCK_ACCESS_FS_MAKE_SOCK;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+/*
+ * Creating multiple links or renaming may lead to privilege escalations if not
+ * handled properly. Indeed, we must be sure that the source doesn't gain more
+ * privileges by being accessible from the destination. This is getting more
+ * complex when dealing with multiple layers. The whole picture can be seen as
+ * a multilayer partial ordering problem. A future version of Landlock will
+ * deal with that.
+ */
+static int hook_path_link(struct dentry *const old_dentry,
+ const struct path *const new_dir,
+ struct dentry *const new_dentry)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ /* The mount points are the same for old and new paths, cf. EXDEV. */
+ if (old_dentry->d_parent != new_dir->dentry)
+ /* Gracefully forbids reparenting. */
+ return -EXDEV;
+ if (unlikely(d_is_negative(old_dentry)))
+ return -ENOENT;
+ return check_access_path(dom, new_dir,
+ get_mode_access(d_backing_inode(old_dentry)->i_mode));
+}
+
+static inline u32 maybe_remove(const struct dentry *const dentry)
+{
+ if (d_is_negative(dentry))
+ return 0;
+ return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
+ LANDLOCK_ACCESS_FS_REMOVE_FILE;
+}
+
+static int hook_path_rename(const struct path *const old_dir,
+ struct dentry *const old_dentry,
+ const struct path *const new_dir,
+ struct dentry *const new_dentry)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ /* The mount points are the same for old and new paths, cf. EXDEV. */
+ if (old_dir->dentry != new_dir->dentry)
+ /* Gracefully forbids reparenting. */
+ return -EXDEV;
+ if (unlikely(d_is_negative(old_dentry)))
+ return -ENOENT;
+ /* RENAME_EXCHANGE is handled because directories are the same. */
+ return check_access_path(dom, old_dir, maybe_remove(old_dentry) |
+ maybe_remove(new_dentry) |
+ get_mode_access(d_backing_inode(old_dentry)->i_mode));
+}
+
+static int hook_path_mkdir(const struct path *const dir,
+ struct dentry *const dentry, const umode_t mode)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
+}
+
+static int hook_path_mknod(const struct path *const dir,
+ struct dentry *const dentry, const umode_t mode,
+ const unsigned int dev)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ return check_access_path(dom, dir, get_mode_access(mode));
+}
+
+static int hook_path_symlink(const struct path *const dir,
+ struct dentry *const dentry, const char *const old_name)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
+}
+
+static int hook_path_unlink(const struct path *const dir,
+ struct dentry *const dentry)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
+}
+
+static int hook_path_rmdir(const struct path *const dir,
+ struct dentry *const dentry)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
+}
+
+/* File hooks */
+
+static inline u32 get_file_access(const struct file *const file)
+{
+ u32 access = 0;
+
+ if (file->f_mode & FMODE_READ) {
+ /* A directory can only be opened in read mode. */
+ if (S_ISDIR(file_inode(file)->i_mode))
+ return LANDLOCK_ACCESS_FS_READ_DIR;
+ access = LANDLOCK_ACCESS_FS_READ_FILE;
+ }
+ if (file->f_mode & FMODE_WRITE)
+ access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
+ /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
+ if (file->f_flags & __FMODE_EXEC)
+ access |= LANDLOCK_ACCESS_FS_EXECUTE;
+ return access;
+}
+
+static int hook_file_open(struct file *const file)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ /*
+ * Because a file may be opened with O_PATH, get_file_access() may
+ * return 0. This case will be handled with a future Landlock
+ * evolution.
+ */
+ return check_access_path(dom, &file->f_path, get_file_access(file));
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
+
+ LSM_HOOK_INIT(sb_delete, hook_sb_delete),
+ LSM_HOOK_INIT(sb_mount, hook_sb_mount),
+ LSM_HOOK_INIT(move_mount, hook_move_mount),
+ LSM_HOOK_INIT(sb_umount, hook_sb_umount),
+ LSM_HOOK_INIT(sb_remount, hook_sb_remount),
+ LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
+
+ LSM_HOOK_INIT(path_link, hook_path_link),
+ LSM_HOOK_INIT(path_rename, hook_path_rename),
+ LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
+ LSM_HOOK_INIT(path_mknod, hook_path_mknod),
+ LSM_HOOK_INIT(path_symlink, hook_path_symlink),
+ LSM_HOOK_INIT(path_unlink, hook_path_unlink),
+ LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
+
+ LSM_HOOK_INIT(file_open, hook_file_open),
+};
+
+__init void landlock_add_fs_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/fs.h b/security/landlock/fs.h
new file mode 100644
index 000000000000..187284b421c9
--- /dev/null
+++ b/security/landlock/fs.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Filesystem management and hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_FS_H
+#define _SECURITY_LANDLOCK_FS_H
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * struct landlock_inode_security - Inode security blob
+ *
+ * Enable to reference a &struct landlock_object tied to an inode (i.e.
+ * underlying object).
+ */
+struct landlock_inode_security {
+ /**
+ * @object: Weak pointer to an allocated object. All assignments of a
+ * new object are protected by the underlying inode->i_lock. However,
+ * atomically disassociating @object from the inode is only protected
+ * by @object->lock, from the time @object's usage refcount drops to
+ * zero to the time this pointer is nulled out (cf. release_inode() and
+ * hook_sb_delete()). Indeed, such disassociation doesn't require
+ * inode->i_lock thanks to the careful rcu_access_pointer() check
+ * performed by get_inode_object().
+ */
+ struct landlock_object __rcu *object;
+};
+
+/**
+ * struct landlock_superblock_security - Superblock security blob
+ *
+ * Enable hook_sb_delete() to wait for concurrent calls to release_inode().
+ */
+struct landlock_superblock_security {
+ /**
+ * @inode_refs: Number of pending inodes (from this superblock) that
+ * are being released by release_inode().
+ * Cf. struct super_block->s_fsnotify_inode_refs .
+ */
+ atomic_long_t inode_refs;
+};
+
+static inline struct landlock_inode_security *landlock_inode(
+ const struct inode *const inode)
+{
+ return inode->i_security + landlock_blob_sizes.lbs_inode;
+}
+
+static inline struct landlock_superblock_security *landlock_superblock(
+ const struct super_block *const superblock)
+{
+ return superblock->s_security + landlock_blob_sizes.lbs_superblock;
+}
+
+__init void landlock_add_fs_hooks(void);
+
+int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
+ const struct path *const path, u32 access_hierarchy);
+
+#endif /* _SECURITY_LANDLOCK_FS_H */
diff --git a/security/landlock/limits.h b/security/landlock/limits.h
new file mode 100644
index 000000000000..2a0a1095ee27
--- /dev/null
+++ b/security/landlock/limits.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Limits for different components
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_LIMITS_H
+#define _SECURITY_LANDLOCK_LIMITS_H
+
+#include <linux/limits.h>
+#include <uapi/linux/landlock.h>
+
+#define LANDLOCK_MAX_NUM_LAYERS 64
+#define LANDLOCK_MAX_NUM_RULES U32_MAX
+
+#define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_MAKE_SYM
+#define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1)
+
+#endif /* _SECURITY_LANDLOCK_LIMITS_H */
diff --git a/security/landlock/object.c b/security/landlock/object.c
new file mode 100644
index 000000000000..d674fdf9ff04
--- /dev/null
+++ b/security/landlock/object.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Object management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/bug.h>
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "object.h"
+
+struct landlock_object *landlock_create_object(
+ const struct landlock_object_underops *const underops,
+ void *const underobj)
+{
+ struct landlock_object *new_object;
+
+ if (WARN_ON_ONCE(!underops || !underobj))
+ return ERR_PTR(-ENOENT);
+ new_object = kzalloc(sizeof(*new_object), GFP_KERNEL_ACCOUNT);
+ if (!new_object)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&new_object->usage, 1);
+ spin_lock_init(&new_object->lock);
+ new_object->underops = underops;
+ new_object->underobj = underobj;
+ return new_object;
+}
+
+/*
+ * The caller must own the object (i.e. thanks to object->usage) to safely put
+ * it.
+ */
+void landlock_put_object(struct landlock_object *const object)
+{
+ /*
+ * The call to @object->underops->release(object) might sleep, e.g.
+ * because of iput().
+ */
+ might_sleep();
+ if (!object)
+ return;
+
+ /*
+ * If the @object's refcount cannot drop to zero, we can just decrement
+ * the refcount without holding a lock. Otherwise, the decrement must
+ * happen under @object->lock for synchronization with things like
+ * get_inode_object().
+ */
+ if (refcount_dec_and_lock(&object->usage, &object->lock)) {
+ __acquire(&object->lock);
+ /*
+ * With @object->lock initially held, remove the reference from
+ * @object->underobj to @object (if it still exists).
+ */
+ object->underops->release(object);
+ kfree_rcu(object, rcu_free);
+ }
+}
diff --git a/security/landlock/object.h b/security/landlock/object.h
new file mode 100644
index 000000000000..3f80674c6c8d
--- /dev/null
+++ b/security/landlock/object.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Object management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_OBJECT_H
+#define _SECURITY_LANDLOCK_OBJECT_H
+
+#include <linux/compiler_types.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+
+struct landlock_object;
+
+/**
+ * struct landlock_object_underops - Operations on an underlying object
+ */
+struct landlock_object_underops {
+ /**
+ * @release: Releases the underlying object (e.g. iput() for an inode).
+ */
+ void (*release)(struct landlock_object *const object)
+ __releases(object->lock);
+};
+
+/**
+ * struct landlock_object - Security blob tied to a kernel object
+ *
+ * The goal of this structure is to enable to tie a set of ephemeral access
+ * rights (pertaining to different domains) to a kernel object (e.g an inode)
+ * in a safe way. This implies to handle concurrent use and modification.
+ *
+ * The lifetime of a &struct landlock_object depends on the rules referring to
+ * it.
+ */
+struct landlock_object {
+ /**
+ * @usage: This counter is used to tie an object to the rules matching
+ * it or to keep it alive while adding a new rule. If this counter
+ * reaches zero, this struct must not be modified, but this counter can
+ * still be read from within an RCU read-side critical section. When
+ * adding a new rule to an object with a usage counter of zero, we must
+ * wait until the pointer to this object is set to NULL (or recycled).
+ */
+ refcount_t usage;
+ /**
+ * @lock: Protects against concurrent modifications. This lock must be
+ * held from the time @usage drops to zero until any weak references
+ * from @underobj to this object have been cleaned up.
+ *
+ * Lock ordering: inode->i_lock nests inside this.
+ */
+ spinlock_t lock;
+ /**
+ * @underobj: Used when cleaning up an object and to mark an object as
+ * tied to its underlying kernel structure. This pointer is protected
+ * by @lock. Cf. landlock_release_inodes() and release_inode().
+ */
+ void *underobj;
+ union {
+ /**
+ * @rcu_free: Enables lockless use of @usage, @lock and
+ * @underobj from within an RCU read-side critical section.
+ * @rcu_free and @underops are only used by
+ * landlock_put_object().
+ */
+ struct rcu_head rcu_free;
+ /**
+ * @underops: Enables landlock_put_object() to release the
+ * underlying object (e.g. inode).
+ */
+ const struct landlock_object_underops *underops;
+ };
+};
+
+struct landlock_object *landlock_create_object(
+ const struct landlock_object_underops *const underops,
+ void *const underobj);
+
+void landlock_put_object(struct landlock_object *const object);
+
+static inline void landlock_get_object(struct landlock_object *const object)
+{
+ if (object)
+ refcount_inc(&object->usage);
+}
+
+#endif /* _SECURITY_LANDLOCK_OBJECT_H */
diff --git a/security/landlock/ptrace.c b/security/landlock/ptrace.c
new file mode 100644
index 000000000000..f55b82446de2
--- /dev/null
+++ b/security/landlock/ptrace.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Ptrace hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#include <asm/current.h>
+#include <linux/cred.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/lsm_hooks.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "cred.h"
+#include "ptrace.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * domain_scope_le - Checks domain ordering for scoped ptrace
+ *
+ * @parent: Parent domain.
+ * @child: Potential child of @parent.
+ *
+ * Checks if the @parent domain is less or equal to (i.e. an ancestor, which
+ * means a subset of) the @child domain.
+ */
+static bool domain_scope_le(const struct landlock_ruleset *const parent,
+ const struct landlock_ruleset *const child)
+{
+ const struct landlock_hierarchy *walker;
+
+ if (!parent)
+ return true;
+ if (!child)
+ return false;
+ for (walker = child->hierarchy; walker; walker = walker->parent) {
+ if (walker == parent->hierarchy)
+ /* @parent is in the scoped hierarchy of @child. */
+ return true;
+ }
+ /* There is no relationship between @parent and @child. */
+ return false;
+}
+
+static bool task_is_scoped(const struct task_struct *const parent,
+ const struct task_struct *const child)
+{
+ bool is_scoped;
+ const struct landlock_ruleset *dom_parent, *dom_child;
+
+ rcu_read_lock();
+ dom_parent = landlock_get_task_domain(parent);
+ dom_child = landlock_get_task_domain(child);
+ is_scoped = domain_scope_le(dom_parent, dom_child);
+ rcu_read_unlock();
+ return is_scoped;
+}
+
+static int task_ptrace(const struct task_struct *const parent,
+ const struct task_struct *const child)
+{
+ /* Quick return for non-landlocked tasks. */
+ if (!landlocked(parent))
+ return 0;
+ if (task_is_scoped(parent, child))
+ return 0;
+ return -EPERM;
+}
+
+/**
+ * hook_ptrace_access_check - Determines whether the current process may access
+ * another
+ *
+ * @child: Process to be accessed.
+ * @mode: Mode of attachment.
+ *
+ * If the current task has Landlock rules, then the child must have at least
+ * the same rules. Else denied.
+ *
+ * Determines whether a process may access another, returning 0 if permission
+ * granted, -errno if denied.
+ */
+static int hook_ptrace_access_check(struct task_struct *const child,
+ const unsigned int mode)
+{
+ return task_ptrace(current, child);
+}
+
+/**
+ * hook_ptrace_traceme - Determines whether another process may trace the
+ * current one
+ *
+ * @parent: Task proposed to be the tracer.
+ *
+ * If the parent has Landlock rules, then the current task must have the same
+ * or more rules. Else denied.
+ *
+ * Determines whether the nominated task is permitted to trace the current
+ * process, returning 0 if permission is granted, -errno if denied.
+ */
+static int hook_ptrace_traceme(struct task_struct *const parent)
+{
+ return task_ptrace(parent, current);
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(ptrace_access_check, hook_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, hook_ptrace_traceme),
+};
+
+__init void landlock_add_ptrace_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/ptrace.h b/security/landlock/ptrace.h
new file mode 100644
index 000000000000..265b220ae3bf
--- /dev/null
+++ b/security/landlock/ptrace.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Ptrace hooks
+ *
+ * Copyright © 2017-2019 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_PTRACE_H
+#define _SECURITY_LANDLOCK_PTRACE_H
+
+__init void landlock_add_ptrace_hooks(void);
+
+#endif /* _SECURITY_LANDLOCK_PTRACE_H */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
new file mode 100644
index 000000000000..ec72b9262bf3
--- /dev/null
+++ b/security/landlock/ruleset.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Ruleset management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/overflow.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "limits.h"
+#include "object.h"
+#include "ruleset.h"
+
+static struct landlock_ruleset *create_ruleset(const u32 num_layers)
+{
+ struct landlock_ruleset *new_ruleset;
+
+ new_ruleset = kzalloc(struct_size(new_ruleset, fs_access_masks,
+ num_layers), GFP_KERNEL_ACCOUNT);
+ if (!new_ruleset)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&new_ruleset->usage, 1);
+ mutex_init(&new_ruleset->lock);
+ new_ruleset->root = RB_ROOT;
+ new_ruleset->num_layers = num_layers;
+ /*
+ * hierarchy = NULL
+ * num_rules = 0
+ * fs_access_masks[] = 0
+ */
+ return new_ruleset;
+}
+
+struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask)
+{
+ struct landlock_ruleset *new_ruleset;
+
+ /* Informs about useless ruleset. */
+ if (!fs_access_mask)
+ return ERR_PTR(-ENOMSG);
+ new_ruleset = create_ruleset(1);
+ if (!IS_ERR(new_ruleset))
+ new_ruleset->fs_access_masks[0] = fs_access_mask;
+ return new_ruleset;
+}
+
+static void build_check_rule(void)
+{
+ const struct landlock_rule rule = {
+ .num_layers = ~0,
+ };
+
+ BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
+}
+
+static struct landlock_rule *create_rule(
+ struct landlock_object *const object,
+ const struct landlock_layer (*const layers)[],
+ const u32 num_layers,
+ const struct landlock_layer *const new_layer)
+{
+ struct landlock_rule *new_rule;
+ u32 new_num_layers;
+
+ build_check_rule();
+ if (new_layer) {
+ /* Should already be checked by landlock_merge_ruleset(). */
+ if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
+ return ERR_PTR(-E2BIG);
+ new_num_layers = num_layers + 1;
+ } else {
+ new_num_layers = num_layers;
+ }
+ new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
+ GFP_KERNEL_ACCOUNT);
+ if (!new_rule)
+ return ERR_PTR(-ENOMEM);
+ RB_CLEAR_NODE(&new_rule->node);
+ landlock_get_object(object);
+ new_rule->object = object;
+ new_rule->num_layers = new_num_layers;
+ /* Copies the original layer stack. */
+ memcpy(new_rule->layers, layers,
+ flex_array_size(new_rule, layers, num_layers));
+ if (new_layer)
+ /* Adds a copy of @new_layer on the layer stack. */
+ new_rule->layers[new_rule->num_layers - 1] = *new_layer;
+ return new_rule;
+}
+
+static void free_rule(struct landlock_rule *const rule)
+{
+ might_sleep();
+ if (!rule)
+ return;
+ landlock_put_object(rule->object);
+ kfree(rule);
+}
+
+static void build_check_ruleset(void)
+{
+ const struct landlock_ruleset ruleset = {
+ .num_rules = ~0,
+ .num_layers = ~0,
+ };
+ typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
+
+ BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
+ BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
+ BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
+}
+
+/**
+ * insert_rule - Create and insert a rule in a ruleset
+ *
+ * @ruleset: The ruleset to be updated.
+ * @object: The object to build the new rule with. The underlying kernel
+ * object must be held by the caller.
+ * @layers: One or multiple layers to be copied into the new rule.
+ * @num_layers: The number of @layers entries.
+ *
+ * When user space requests to add a new rule to a ruleset, @layers only
+ * contains one entry and this entry is not assigned to any level. In this
+ * case, the new rule will extend @ruleset, similarly to a boolean OR between
+ * access rights.
+ *
+ * When merging a ruleset in a domain, or copying a domain, @layers will be
+ * added to @ruleset as new constraints, similarly to a boolean AND between
+ * access rights.
+ */
+static int insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object,
+ const struct landlock_layer (*const layers)[],
+ size_t num_layers)
+{
+ struct rb_node **walker_node;
+ struct rb_node *parent_node = NULL;
+ struct landlock_rule *new_rule;
+
+ might_sleep();
+ lockdep_assert_held(&ruleset->lock);
+ if (WARN_ON_ONCE(!object || !layers))
+ return -ENOENT;
+ walker_node = &(ruleset->root.rb_node);
+ while (*walker_node) {
+ struct landlock_rule *const this = rb_entry(*walker_node,
+ struct landlock_rule, node);
+
+ if (this->object != object) {
+ parent_node = *walker_node;
+ if (this->object < object)
+ walker_node = &((*walker_node)->rb_right);
+ else
+ walker_node = &((*walker_node)->rb_left);
+ continue;
+ }
+
+ /* Only a single-level layer should match an existing rule. */
+ if (WARN_ON_ONCE(num_layers != 1))
+ return -EINVAL;
+
+ /* If there is a matching rule, updates it. */
+ if ((*layers)[0].level == 0) {
+ /*
+ * Extends access rights when the request comes from
+ * landlock_add_rule(2), i.e. @ruleset is not a domain.
+ */
+ if (WARN_ON_ONCE(this->num_layers != 1))
+ return -EINVAL;
+ if (WARN_ON_ONCE(this->layers[0].level != 0))
+ return -EINVAL;
+ this->layers[0].access |= (*layers)[0].access;
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(this->layers[0].level == 0))
+ return -EINVAL;
+
+ /*
+ * Intersects access rights when it is a merge between a
+ * ruleset and a domain.
+ */
+ new_rule = create_rule(object, &this->layers, this->num_layers,
+ &(*layers)[0]);
+ if (IS_ERR(new_rule))
+ return PTR_ERR(new_rule);
+ rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
+ free_rule(this);
+ return 0;
+ }
+
+ /* There is no match for @object. */
+ build_check_ruleset();
+ if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
+ return -E2BIG;
+ new_rule = create_rule(object, layers, num_layers, NULL);
+ if (IS_ERR(new_rule))
+ return PTR_ERR(new_rule);
+ rb_link_node(&new_rule->node, parent_node, walker_node);
+ rb_insert_color(&new_rule->node, &ruleset->root);
+ ruleset->num_rules++;
+ return 0;
+}
+
+static void build_check_layer(void)
+{
+ const struct landlock_layer layer = {
+ .level = ~0,
+ .access = ~0,
+ };
+
+ BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
+ BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
+}
+
+/* @ruleset must be locked by the caller. */
+int landlock_insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object, const u32 access)
+{
+ struct landlock_layer layers[] = {{
+ .access = access,
+ /* When @level is zero, insert_rule() extends @ruleset. */
+ .level = 0,
+ }};
+
+ build_check_layer();
+ return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
+}
+
+static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
+{
+ if (hierarchy)
+ refcount_inc(&hierarchy->usage);
+}
+
+static void put_hierarchy(struct landlock_hierarchy *hierarchy)
+{
+ while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
+ const struct landlock_hierarchy *const freeme = hierarchy;
+
+ hierarchy = hierarchy->parent;
+ kfree(freeme);
+ }
+}
+
+static int merge_ruleset(struct landlock_ruleset *const dst,
+ struct landlock_ruleset *const src)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ int err = 0;
+
+ might_sleep();
+ /* Should already be checked by landlock_merge_ruleset() */
+ if (WARN_ON_ONCE(!src))
+ return 0;
+ /* Only merge into a domain. */
+ if (WARN_ON_ONCE(!dst || !dst->hierarchy))
+ return -EINVAL;
+
+ /* Locks @dst first because we are its only owner. */
+ mutex_lock(&dst->lock);
+ mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
+
+ /* Stacks the new layer. */
+ if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
+
+ /* Merges the @src tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
+ &src->root, node) {
+ struct landlock_layer layers[] = {{
+ .level = dst->num_layers,
+ }};
+
+ if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ layers[0].access = walker_rule->layers[0].access;
+ err = insert_rule(dst, walker_rule->object, &layers,
+ ARRAY_SIZE(layers));
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&src->lock);
+ mutex_unlock(&dst->lock);
+ return err;
+}
+
+static int inherit_ruleset(struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const child)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ int err = 0;
+
+ might_sleep();
+ if (!parent)
+ return 0;
+
+ /* Locks @child first because we are its only owner. */
+ mutex_lock(&child->lock);
+ mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
+
+ /* Copies the @parent tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
+ &parent->root, node) {
+ err = insert_rule(child, walker_rule->object,
+ &walker_rule->layers, walker_rule->num_layers);
+ if (err)
+ goto out_unlock;
+ }
+
+ if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ /* Copies the parent layer stack and leaves a space for the new layer. */
+ memcpy(child->fs_access_masks, parent->fs_access_masks,
+ flex_array_size(parent, fs_access_masks, parent->num_layers));
+
+ if (WARN_ON_ONCE(!parent->hierarchy)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ get_hierarchy(parent->hierarchy);
+ child->hierarchy->parent = parent->hierarchy;
+
+out_unlock:
+ mutex_unlock(&parent->lock);
+ mutex_unlock(&child->lock);
+ return err;
+}
+
+static void free_ruleset(struct landlock_ruleset *const ruleset)
+{
+ struct landlock_rule *freeme, *next;
+
+ might_sleep();
+ rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root,
+ node)
+ free_rule(freeme);
+ put_hierarchy(ruleset->hierarchy);
+ kfree(ruleset);
+}
+
+void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
+{
+ might_sleep();
+ if (ruleset && refcount_dec_and_test(&ruleset->usage))
+ free_ruleset(ruleset);
+}
+
+static void free_ruleset_work(struct work_struct *const work)
+{
+ struct landlock_ruleset *ruleset;
+
+ ruleset = container_of(work, struct landlock_ruleset, work_free);
+ free_ruleset(ruleset);
+}
+
+void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
+{
+ if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
+ INIT_WORK(&ruleset->work_free, free_ruleset_work);
+ schedule_work(&ruleset->work_free);
+ }
+}
+
+/**
+ * landlock_merge_ruleset - Merge a ruleset with a domain
+ *
+ * @parent: Parent domain.
+ * @ruleset: New ruleset to be merged.
+ *
+ * Returns the intersection of @parent and @ruleset, or returns @parent if
+ * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
+ */
+struct landlock_ruleset *landlock_merge_ruleset(
+ struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const ruleset)
+{
+ struct landlock_ruleset *new_dom;
+ u32 num_layers;
+ int err;
+
+ might_sleep();
+ if (WARN_ON_ONCE(!ruleset || parent == ruleset))
+ return ERR_PTR(-EINVAL);
+
+ if (parent) {
+ if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
+ return ERR_PTR(-E2BIG);
+ num_layers = parent->num_layers + 1;
+ } else {
+ num_layers = 1;
+ }
+
+ /* Creates a new domain... */
+ new_dom = create_ruleset(num_layers);
+ if (IS_ERR(new_dom))
+ return new_dom;
+ new_dom->hierarchy = kzalloc(sizeof(*new_dom->hierarchy),
+ GFP_KERNEL_ACCOUNT);
+ if (!new_dom->hierarchy) {
+ err = -ENOMEM;
+ goto out_put_dom;
+ }
+ refcount_set(&new_dom->hierarchy->usage, 1);
+
+ /* ...as a child of @parent... */
+ err = inherit_ruleset(parent, new_dom);
+ if (err)
+ goto out_put_dom;
+
+ /* ...and including @ruleset. */
+ err = merge_ruleset(new_dom, ruleset);
+ if (err)
+ goto out_put_dom;
+
+ return new_dom;
+
+out_put_dom:
+ landlock_put_ruleset(new_dom);
+ return ERR_PTR(err);
+}
+
+/*
+ * The returned access has the same lifetime as @ruleset.
+ */
+const struct landlock_rule *landlock_find_rule(
+ const struct landlock_ruleset *const ruleset,
+ const struct landlock_object *const object)
+{
+ const struct rb_node *node;
+
+ if (!object)
+ return NULL;
+ node = ruleset->root.rb_node;
+ while (node) {
+ struct landlock_rule *this = rb_entry(node,
+ struct landlock_rule, node);
+
+ if (this->object == object)
+ return this;
+ if (this->object < object)
+ node = node->rb_right;
+ else
+ node = node->rb_left;
+ }
+ return NULL;
+}
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
new file mode 100644
index 000000000000..2d3ed7ec5a0a
--- /dev/null
+++ b/security/landlock/ruleset.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Ruleset management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_RULESET_H
+#define _SECURITY_LANDLOCK_RULESET_H
+
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+
+#include "object.h"
+
+/**
+ * struct landlock_layer - Access rights for a given layer
+ */
+struct landlock_layer {
+ /**
+ * @level: Position of this layer in the layer stack.
+ */
+ u16 level;
+ /**
+ * @access: Bitfield of allowed actions on the kernel object. They are
+ * relative to the object type (e.g. %LANDLOCK_ACTION_FS_READ).
+ */
+ u16 access;
+};
+
+/**
+ * struct landlock_rule - Access rights tied to an object
+ */
+struct landlock_rule {
+ /**
+ * @node: Node in the ruleset's red-black tree.
+ */
+ struct rb_node node;
+ /**
+ * @object: Pointer to identify a kernel object (e.g. an inode). This
+ * is used as a key for this ruleset element. This pointer is set once
+ * and never modified. It always points to an allocated object because
+ * each rule increments the refcount of its object.
+ */
+ struct landlock_object *object;
+ /**
+ * @num_layers: Number of entries in @layers.
+ */
+ u32 num_layers;
+ /**
+ * @layers: Stack of layers, from the latest to the newest, implemented
+ * as a flexible array member (FAM).
+ */
+ struct landlock_layer layers[];
+};
+
+/**
+ * struct landlock_hierarchy - Node in a ruleset hierarchy
+ */
+struct landlock_hierarchy {
+ /**
+ * @parent: Pointer to the parent node, or NULL if it is a root
+ * Landlock domain.
+ */
+ struct landlock_hierarchy *parent;
+ /**
+ * @usage: Number of potential children domains plus their parent
+ * domain.
+ */
+ refcount_t usage;
+};
+
+/**
+ * struct landlock_ruleset - Landlock ruleset
+ *
+ * This data structure must contain unique entries, be updatable, and quick to
+ * match an object.
+ */
+struct landlock_ruleset {
+ /**
+ * @root: Root of a red-black tree containing &struct landlock_rule
+ * nodes. Once a ruleset is tied to a process (i.e. as a domain), this
+ * tree is immutable until @usage reaches zero.
+ */
+ struct rb_root root;
+ /**
+ * @hierarchy: Enables hierarchy identification even when a parent
+ * domain vanishes. This is needed for the ptrace protection.
+ */
+ struct landlock_hierarchy *hierarchy;
+ union {
+ /**
+ * @work_free: Enables to free a ruleset within a lockless
+ * section. This is only used by
+ * landlock_put_ruleset_deferred() when @usage reaches zero.
+ * The fields @lock, @usage, @num_rules, @num_layers and
+ * @fs_access_masks are then unused.
+ */
+ struct work_struct work_free;
+ struct {
+ /**
+ * @lock: Protects against concurrent modifications of
+ * @root, if @usage is greater than zero.
+ */
+ struct mutex lock;
+ /**
+ * @usage: Number of processes (i.e. domains) or file
+ * descriptors referencing this ruleset.
+ */
+ refcount_t usage;
+ /**
+ * @num_rules: Number of non-overlapping (i.e. not for
+ * the same object) rules in this ruleset.
+ */
+ u32 num_rules;
+ /**
+ * @num_layers: Number of layers that are used in this
+ * ruleset. This enables to check that all the layers
+ * allow an access request. A value of 0 identifies a
+ * non-merged ruleset (i.e. not a domain).
+ */
+ u32 num_layers;
+ /**
+ * @fs_access_masks: Contains the subset of filesystem
+ * actions that are restricted by a ruleset. A domain
+ * saves all layers of merged rulesets in a stack
+ * (FAM), starting from the first layer to the last
+ * one. These layers are used when merging rulesets,
+ * for user space backward compatibility (i.e.
+ * future-proof), and to properly handle merged
+ * rulesets without overlapping access rights. These
+ * layers are set once and never changed for the
+ * lifetime of the ruleset.
+ */
+ u16 fs_access_masks[];
+ };
+ };
+};
+
+struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask);
+
+void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
+void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
+
+int landlock_insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object, const u32 access);
+
+struct landlock_ruleset *landlock_merge_ruleset(
+ struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const ruleset);
+
+const struct landlock_rule *landlock_find_rule(
+ const struct landlock_ruleset *const ruleset,
+ const struct landlock_object *const object);
+
+static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset)
+{
+ if (ruleset)
+ refcount_inc(&ruleset->usage);
+}
+
+#endif /* _SECURITY_LANDLOCK_RULESET_H */
diff --git a/security/landlock/setup.c b/security/landlock/setup.c
new file mode 100644
index 000000000000..f8e8e980454c
--- /dev/null
+++ b/security/landlock/setup.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Security framework setup
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/init.h>
+#include <linux/lsm_hooks.h>
+
+#include "common.h"
+#include "cred.h"
+#include "fs.h"
+#include "ptrace.h"
+#include "setup.h"
+
+bool landlock_initialized __lsm_ro_after_init = false;
+
+struct lsm_blob_sizes landlock_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct landlock_cred_security),
+ .lbs_inode = sizeof(struct landlock_inode_security),
+ .lbs_superblock = sizeof(struct landlock_superblock_security),
+};
+
+static int __init landlock_init(void)
+{
+ landlock_add_cred_hooks();
+ landlock_add_ptrace_hooks();
+ landlock_add_fs_hooks();
+ landlock_initialized = true;
+ pr_info("Up and running.\n");
+ return 0;
+}
+
+DEFINE_LSM(LANDLOCK_NAME) = {
+ .name = LANDLOCK_NAME,
+ .init = landlock_init,
+ .blobs = &landlock_blob_sizes,
+};
diff --git a/security/landlock/setup.h b/security/landlock/setup.h
new file mode 100644
index 000000000000..1daffab1ab4b
--- /dev/null
+++ b/security/landlock/setup.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Security framework setup
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_SETUP_H
+#define _SECURITY_LANDLOCK_SETUP_H
+
+#include <linux/lsm_hooks.h>
+
+extern bool landlock_initialized;
+
+extern struct lsm_blob_sizes landlock_blob_sizes;
+
+#endif /* _SECURITY_LANDLOCK_SETUP_H */
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
new file mode 100644
index 000000000000..32396962f04d
--- /dev/null
+++ b/security/landlock/syscalls.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - System call implementations and user space interfaces
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <asm/current.h>
+#include <linux/anon_inodes.h>
+#include <linux/build_bug.h>
+#include <linux/capability.h>
+#include <linux/compiler_types.h>
+#include <linux/dcache.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/mount.h>
+#include <linux/path.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/landlock.h>
+
+#include "cred.h"
+#include "fs.h"
+#include "limits.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * copy_min_struct_from_user - Safe future-proof argument copying
+ *
+ * Extend copy_struct_from_user() to check for consistent user buffer.
+ *
+ * @dst: Kernel space pointer or NULL.
+ * @ksize: Actual size of the data pointed to by @dst.
+ * @ksize_min: Minimal required size to be copied.
+ * @src: User space pointer or NULL.
+ * @usize: (Alleged) size of the data pointed to by @src.
+ */
+static __always_inline int copy_min_struct_from_user(void *const dst,
+ const size_t ksize, const size_t ksize_min,
+ const void __user *const src, const size_t usize)
+{
+ /* Checks buffer inconsistencies. */
+ BUILD_BUG_ON(!dst);
+ if (!src)
+ return -EFAULT;
+
+ /* Checks size ranges. */
+ BUILD_BUG_ON(ksize <= 0);
+ BUILD_BUG_ON(ksize < ksize_min);
+ if (usize < ksize_min)
+ return -EINVAL;
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ /* Copies user buffer and fills with zeros. */
+ return copy_struct_from_user(dst, ksize, src, usize);
+}
+
+/*
+ * This function only contains arithmetic operations with constants, leading to
+ * BUILD_BUG_ON(). The related code is evaluated and checked at build time,
+ * but it is then ignored thanks to compiler optimizations.
+ */
+static void build_check_abi(void)
+{
+ struct landlock_ruleset_attr ruleset_attr;
+ struct landlock_path_beneath_attr path_beneath_attr;
+ size_t ruleset_size, path_beneath_size;
+
+ /*
+ * For each user space ABI structures, first checks that there is no
+ * hole in them, then checks that all architectures have the same
+ * struct size.
+ */
+ ruleset_size = sizeof(ruleset_attr.handled_access_fs);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != 8);
+
+ path_beneath_size = sizeof(path_beneath_attr.allowed_access);
+ path_beneath_size += sizeof(path_beneath_attr.parent_fd);
+ BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size);
+ BUILD_BUG_ON(sizeof(path_beneath_attr) != 12);
+}
+
+/* Ruleset handling */
+
+static int fop_ruleset_release(struct inode *const inode,
+ struct file *const filp)
+{
+ struct landlock_ruleset *ruleset = filp->private_data;
+
+ landlock_put_ruleset(ruleset);
+ return 0;
+}
+
+static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf,
+ const size_t size, loff_t *const ppos)
+{
+ /* Dummy handler to enable FMODE_CAN_READ. */
+ return -EINVAL;
+}
+
+static ssize_t fop_dummy_write(struct file *const filp,
+ const char __user *const buf, const size_t size,
+ loff_t *const ppos)
+{
+ /* Dummy handler to enable FMODE_CAN_WRITE. */
+ return -EINVAL;
+}
+
+/*
+ * A ruleset file descriptor enables to build a ruleset by adding (i.e.
+ * writing) rule after rule, without relying on the task's context. This
+ * reentrant design is also used in a read way to enforce the ruleset on the
+ * current task.
+ */
+static const struct file_operations ruleset_fops = {
+ .release = fop_ruleset_release,
+ .read = fop_dummy_read,
+ .write = fop_dummy_write,
+};
+
+#define LANDLOCK_ABI_VERSION 1
+
+/**
+ * sys_landlock_create_ruleset - Create a new ruleset
+ *
+ * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of
+ * the new ruleset.
+ * @size: Size of the pointed &struct landlock_ruleset_attr (needed for
+ * backward and forward compatibility).
+ * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
+ *
+ * This system call enables to create a new Landlock ruleset, and returns the
+ * related file descriptor on success.
+ *
+ * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
+ * 0, then the returned value is the highest supported Landlock ABI version
+ * (starting at 1).
+ *
+ * Possible returned errors are:
+ *
+ * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - EINVAL: unknown @flags, or unknown access, or too small @size;
+ * - E2BIG or EFAULT: @attr or @size inconsistencies;
+ * - ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
+ */
+SYSCALL_DEFINE3(landlock_create_ruleset,
+ const struct landlock_ruleset_attr __user *const, attr,
+ const size_t, size, const __u32, flags)
+{
+ struct landlock_ruleset_attr ruleset_attr;
+ struct landlock_ruleset *ruleset;
+ int err, ruleset_fd;
+
+ /* Build-time checks. */
+ build_check_abi();
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ if (flags) {
+ if ((flags == LANDLOCK_CREATE_RULESET_VERSION)
+ && !attr && !size)
+ return LANDLOCK_ABI_VERSION;
+ return -EINVAL;
+ }
+
+ /* Copies raw user space buffer. */
+ err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr),
+ offsetofend(typeof(ruleset_attr), handled_access_fs),
+ attr, size);
+ if (err)
+ return err;
+
+ /* Checks content (and 32-bits cast). */
+ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) !=
+ LANDLOCK_MASK_ACCESS_FS)
+ return -EINVAL;
+
+ /* Checks arguments and transforms to kernel struct. */
+ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* Creates anonymous FD referring to the ruleset. */
+ ruleset_fd = anon_inode_getfd("landlock-ruleset", &ruleset_fops,
+ ruleset, O_RDWR | O_CLOEXEC);
+ if (ruleset_fd < 0)
+ landlock_put_ruleset(ruleset);
+ return ruleset_fd;
+}
+
+/*
+ * Returns an owned ruleset from a FD. It is thus needed to call
+ * landlock_put_ruleset() on the return value.
+ */
+static struct landlock_ruleset *get_ruleset_from_fd(const int fd,
+ const fmode_t mode)
+{
+ struct fd ruleset_f;
+ struct landlock_ruleset *ruleset;
+
+ ruleset_f = fdget(fd);
+ if (!ruleset_f.file)
+ return ERR_PTR(-EBADF);
+
+ /* Checks FD type and access right. */
+ if (ruleset_f.file->f_op != &ruleset_fops) {
+ ruleset = ERR_PTR(-EBADFD);
+ goto out_fdput;
+ }
+ if (!(ruleset_f.file->f_mode & mode)) {
+ ruleset = ERR_PTR(-EPERM);
+ goto out_fdput;
+ }
+ ruleset = ruleset_f.file->private_data;
+ if (WARN_ON_ONCE(ruleset->num_layers != 1)) {
+ ruleset = ERR_PTR(-EINVAL);
+ goto out_fdput;
+ }
+ landlock_get_ruleset(ruleset);
+
+out_fdput:
+ fdput(ruleset_f);
+ return ruleset;
+}
+
+/* Path handling */
+
+/*
+ * @path: Must call put_path(@path) after the call if it succeeded.
+ */
+static int get_path_from_fd(const s32 fd, struct path *const path)
+{
+ struct fd f;
+ int err = 0;
+
+ BUILD_BUG_ON(!__same_type(fd,
+ ((struct landlock_path_beneath_attr *)NULL)->parent_fd));
+
+ /* Handles O_PATH. */
+ f = fdget_raw(fd);
+ if (!f.file)
+ return -EBADF;
+ /*
+ * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including
+ * pseudo filesystems that will never be mountable (e.g. sockfs,
+ * pipefs).
+ */
+ if ((f.file->f_op == &ruleset_fops) ||
+ (f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
+ (f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
+ d_is_negative(f.file->f_path.dentry) ||
+ IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) {
+ err = -EBADFD;
+ goto out_fdput;
+ }
+ *path = f.file->f_path;
+ path_get(path);
+
+out_fdput:
+ fdput(f);
+ return err;
+}
+
+/**
+ * sys_landlock_add_rule - Add a new rule to a ruleset
+ *
+ * @ruleset_fd: File descriptor tied to the ruleset that should be extended
+ * with the new rule.
+ * @rule_type: Identify the structure type pointed to by @rule_attr (only
+ * LANDLOCK_RULE_PATH_BENEATH for now).
+ * @rule_attr: Pointer to a rule (only of type &struct
+ * landlock_path_beneath_attr for now).
+ * @flags: Must be 0.
+ *
+ * This system call enables to define a new rule and add it to an existing
+ * ruleset.
+ *
+ * Possible returned errors are:
+ *
+ * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e.
+ * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's
+ * accesses);
+ * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access);
+ * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a
+ * member of @rule_attr is not a file descriptor as expected;
+ * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of
+ * @rule_attr is not the expected file descriptor type (e.g. file open
+ * without O_PATH);
+ * - EPERM: @ruleset_fd has no write access to the underlying ruleset;
+ * - EFAULT: @rule_attr inconsistency.
+ */
+SYSCALL_DEFINE4(landlock_add_rule,
+ const int, ruleset_fd, const enum landlock_rule_type, rule_type,
+ const void __user *const, rule_attr, const __u32, flags)
+{
+ struct landlock_path_beneath_attr path_beneath_attr;
+ struct path path;
+ struct landlock_ruleset *ruleset;
+ int res, err;
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ /* No flag for now. */
+ if (flags)
+ return -EINVAL;
+
+ if (rule_type != LANDLOCK_RULE_PATH_BENEATH)
+ return -EINVAL;
+
+ /* Copies raw user space buffer, only one type for now. */
+ res = copy_from_user(&path_beneath_attr, rule_attr,
+ sizeof(path_beneath_attr));
+ if (res)
+ return -EFAULT;
+
+ /* Gets and checks the ruleset. */
+ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /*
+ * Informs about useless rule: empty allowed_access (i.e. deny rules)
+ * are ignored in path walks.
+ */
+ if (!path_beneath_attr.allowed_access) {
+ err = -ENOMSG;
+ goto out_put_ruleset;
+ }
+ /*
+ * Checks that allowed_access matches the @ruleset constraints
+ * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits).
+ */
+ if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) !=
+ ruleset->fs_access_masks[0]) {
+ err = -EINVAL;
+ goto out_put_ruleset;
+ }
+
+ /* Gets and checks the new rule. */
+ err = get_path_from_fd(path_beneath_attr.parent_fd, &path);
+ if (err)
+ goto out_put_ruleset;
+
+ /* Imports the new rule. */
+ err = landlock_append_fs_rule(ruleset, &path,
+ path_beneath_attr.allowed_access);
+ path_put(&path);
+
+out_put_ruleset:
+ landlock_put_ruleset(ruleset);
+ return err;
+}
+
+/* Enforcement */
+
+/**
+ * sys_landlock_restrict_self - Enforce a ruleset on the calling thread
+ *
+ * @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
+ * @flags: Must be 0.
+ *
+ * This system call enables to enforce a Landlock ruleset on the current
+ * thread. Enforcing a ruleset requires that the task has CAP_SYS_ADMIN in its
+ * namespace or is running with no_new_privs. This avoids scenarios where
+ * unprivileged tasks can affect the behavior of privileged children.
+ *
+ * Possible returned errors are:
+ *
+ * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - EINVAL: @flags is not 0.
+ * - EBADF: @ruleset_fd is not a file descriptor for the current thread;
+ * - EBADFD: @ruleset_fd is not a ruleset file descriptor;
+ * - EPERM: @ruleset_fd has no read access to the underlying ruleset, or the
+ * current thread is not running with no_new_privs, or it doesn't have
+ * CAP_SYS_ADMIN in its namespace.
+ * - E2BIG: The maximum number of stacked rulesets is reached for the current
+ * thread.
+ */
+SYSCALL_DEFINE2(landlock_restrict_self,
+ const int, ruleset_fd, const __u32, flags)
+{
+ struct landlock_ruleset *new_dom, *ruleset;
+ struct cred *new_cred;
+ struct landlock_cred_security *new_llcred;
+ int err;
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ /* No flag for now. */
+ if (flags)
+ return -EINVAL;
+
+ /*
+ * Similar checks as for seccomp(2), except that an -EPERM may be
+ * returned.
+ */
+ if (!task_no_new_privs(current) &&
+ !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* Gets and checks the ruleset. */
+ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* Prepares new credentials. */
+ new_cred = prepare_creds();
+ if (!new_cred) {
+ err = -ENOMEM;
+ goto out_put_ruleset;
+ }
+ new_llcred = landlock_cred(new_cred);
+
+ /*
+ * There is no possible race condition while copying and manipulating
+ * the current credentials because they are dedicated per thread.
+ */
+ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset);
+ if (IS_ERR(new_dom)) {
+ err = PTR_ERR(new_dom);
+ goto out_put_creds;
+ }
+
+ /* Replaces the old (prepared) domain. */
+ landlock_put_ruleset(new_llcred->domain);
+ new_llcred->domain = new_dom;
+
+ landlock_put_ruleset(ruleset);
+ return commit_creds(new_cred);
+
+out_put_creds:
+ abort_creds(new_cred);
+
+out_put_ruleset:
+ landlock_put_ruleset(ruleset);
+ return err;
+}
diff --git a/security/security.c b/security/security.c
index 5ac96b16f8fa..b38155b2de83 100644
--- a/security/security.c
+++ b/security/security.c
@@ -203,6 +203,7 @@ static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
+ lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
}
@@ -333,12 +334,13 @@ static void __init ordered_lsm_init(void)
for (lsm = ordered_lsms; *lsm; lsm++)
prepare_lsm(*lsm);
- init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
- init_debug("file blob size = %d\n", blob_sizes.lbs_file);
- init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
- init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
- init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
- init_debug("task blob size = %d\n", blob_sizes.lbs_task);
+ init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
+ init_debug("file blob size = %d\n", blob_sizes.lbs_file);
+ init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
+ init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
+ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
+ init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
+ init_debug("task blob size = %d\n", blob_sizes.lbs_task);
/*
* Create any kmem_caches needed for blobs
@@ -670,6 +672,27 @@ static void __init lsm_early_task(struct task_struct *task)
panic("%s: Early task alloc failed.\n", __func__);
}
+/**
+ * lsm_superblock_alloc - allocate a composite superblock blob
+ * @sb: the superblock that needs a blob
+ *
+ * Allocate the superblock blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_superblock_alloc(struct super_block *sb)
+{
+ if (blob_sizes.lbs_superblock == 0) {
+ sb->s_security = NULL;
+ return 0;
+ }
+
+ sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
+ if (sb->s_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
/*
* The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
* can be accessed with:
@@ -867,12 +890,26 @@ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *
int security_sb_alloc(struct super_block *sb)
{
- return call_int_hook(sb_alloc_security, 0, sb);
+ int rc = lsm_superblock_alloc(sb);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sb_alloc_security, 0, sb);
+ if (unlikely(rc))
+ security_sb_free(sb);
+ return rc;
+}
+
+void security_sb_delete(struct super_block *sb)
+{
+ call_void_hook(sb_delete, sb);
}
void security_sb_free(struct super_block *sb)
{
call_void_hook(sb_free_security, sb);
+ kfree(sb->s_security);
+ sb->s_security = NULL;
}
void security_free_mnt_opts(void **mnt_opts)
@@ -890,6 +927,13 @@ int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
}
EXPORT_SYMBOL(security_sb_eat_lsm_opts);
+int security_sb_mnt_opts_compat(struct super_block *sb,
+ void *mnt_opts)
+{
+ return call_int_hook(sb_mnt_opts_compat, 0, sb, mnt_opts);
+}
+EXPORT_SYMBOL(security_sb_mnt_opts_compat);
+
int security_sb_remount(struct super_block *sb,
void *mnt_opts)
{
@@ -1762,12 +1806,19 @@ int security_task_getsid(struct task_struct *p)
return call_int_hook(task_getsid, 0, p);
}
-void security_task_getsecid(struct task_struct *p, u32 *secid)
+void security_task_getsecid_subj(struct task_struct *p, u32 *secid)
+{
+ *secid = 0;
+ call_void_hook(task_getsecid_subj, p, secid);
+}
+EXPORT_SYMBOL(security_task_getsecid_subj);
+
+void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
- call_void_hook(task_getsecid, p, secid);
+ call_void_hook(task_getsecid_obj, p, secid);
}
-EXPORT_SYMBOL(security_task_getsecid);
+EXPORT_SYMBOL(security_task_getsecid_obj);
int security_task_setnice(struct task_struct *p, int nice)
{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ddd097790d47..eaea837d89d1 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -230,9 +230,22 @@ static inline u32 cred_sid(const struct cred *cred)
}
/*
+ * get the subjective security ID of a task
+ */
+static inline u32 task_sid_subj(const struct task_struct *task)
+{
+ u32 sid;
+
+ rcu_read_lock();
+ sid = cred_sid(rcu_dereference(task->cred));
+ rcu_read_unlock();
+ return sid;
+}
+
+/*
* get the objective security ID of a task
*/
-static inline u32 task_sid(const struct task_struct *task)
+static inline u32 task_sid_obj(const struct task_struct *task)
{
u32 sid;
@@ -242,6 +255,29 @@ static inline u32 task_sid(const struct task_struct *task)
return sid;
}
+/*
+ * get the security ID of a task for use with binder
+ */
+static inline u32 task_sid_binder(const struct task_struct *task)
+{
+ /*
+ * In many case where this function is used we should be using the
+ * task's subjective SID, but we can't reliably access the subjective
+ * creds of a task other than our own so we must use the objective
+ * creds/SID, which are safe to access. The downside is that if a task
+ * is temporarily overriding it's creds it will not be reflected here;
+ * however, it isn't clear that binder would handle that case well
+ * anyway.
+ *
+ * If this ever changes and we can safely reference the subjective
+ * creds/SID of another task, this function will make it easier to
+ * identify the various places where we make use of the task SIDs in
+ * the binder code. It is also likely that we will need to adjust
+ * the main drivers/android binder code as well.
+ */
+ return task_sid_obj(task);
+}
+
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
/*
@@ -322,7 +358,7 @@ static void inode_free_security(struct inode *inode)
if (!isec)
return;
- sbsec = inode->i_sb->s_security;
+ sbsec = selinux_superblock(inode->i_sb);
/*
* As not all inode security structures are in a list, we check for
* empty list outside of the lock to make sure that we won't waste
@@ -340,13 +376,6 @@ static void inode_free_security(struct inode *inode)
}
}
-static void superblock_free_security(struct super_block *sb)
-{
- struct superblock_security_struct *sbsec = sb->s_security;
- sb->s_security = NULL;
- kfree(sbsec);
-}
-
struct selinux_mnt_opts {
const char *fscontext, *context, *rootcontext, *defcontext;
};
@@ -458,7 +487,7 @@ static int selinux_is_genfs_special_handling(struct super_block *sb)
static int selinux_is_sblabel_mnt(struct super_block *sb)
{
- struct superblock_security_struct *sbsec = sb->s_security;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
/*
* IMPORTANT: Double-check logic in this function when adding a new
@@ -535,7 +564,7 @@ fallback:
static int sb_finish_set_opts(struct super_block *sb)
{
- struct superblock_security_struct *sbsec = sb->s_security;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
struct dentry *root = sb->s_root;
struct inode *root_inode = d_backing_inode(root);
int rc = 0;
@@ -626,7 +655,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
unsigned long *set_kern_flags)
{
const struct cred *cred = current_cred();
- struct superblock_security_struct *sbsec = sb->s_security;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
struct dentry *root = sb->s_root;
struct selinux_mnt_opts *opts = mnt_opts;
struct inode_security_struct *root_isec;
@@ -760,7 +789,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (sb->s_user_ns != &init_user_ns &&
strcmp(sb->s_type->name, "tmpfs") &&
strcmp(sb->s_type->name, "ramfs") &&
- strcmp(sb->s_type->name, "devpts")) {
+ strcmp(sb->s_type->name, "devpts") &&
+ strcmp(sb->s_type->name, "overlay")) {
if (context_sid || fscontext_sid || rootcontext_sid ||
defcontext_sid) {
rc = -EACCES;
@@ -863,8 +893,8 @@ out_double_mount:
static int selinux_cmp_sb_context(const struct super_block *oldsb,
const struct super_block *newsb)
{
- struct superblock_security_struct *old = oldsb->s_security;
- struct superblock_security_struct *new = newsb->s_security;
+ struct superblock_security_struct *old = selinux_superblock(oldsb);
+ struct superblock_security_struct *new = selinux_superblock(newsb);
char oldflags = old->flags & SE_MNTMASK;
char newflags = new->flags & SE_MNTMASK;
@@ -896,8 +926,9 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
unsigned long *set_kern_flags)
{
int rc = 0;
- const struct superblock_security_struct *oldsbsec = oldsb->s_security;
- struct superblock_security_struct *newsbsec = newsb->s_security;
+ const struct superblock_security_struct *oldsbsec =
+ selinux_superblock(oldsb);
+ struct superblock_security_struct *newsbsec = selinux_superblock(newsb);
int set_fscontext = (oldsbsec->flags & FSCONTEXT_MNT);
int set_context = (oldsbsec->flags & CONTEXT_MNT);
@@ -1076,7 +1107,7 @@ static int show_sid(struct seq_file *m, u32 sid)
static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
{
- struct superblock_security_struct *sbsec = sb->s_security;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
int rc;
if (!(sbsec->flags & SE_SBINITIALIZED))
@@ -1427,7 +1458,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
if (isec->sclass == SECCLASS_FILE)
isec->sclass = inode_mode_to_security_class(inode->i_mode);
- sbsec = inode->i_sb->s_security;
+ sbsec = selinux_superblock(inode->i_sb);
if (!(sbsec->flags & SE_SBINITIALIZED)) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
@@ -1778,7 +1809,8 @@ selinux_determine_inode_label(const struct task_security_struct *tsec,
const struct qstr *name, u16 tclass,
u32 *_new_isid)
{
- const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
+ const struct superblock_security_struct *sbsec =
+ selinux_superblock(dir->i_sb);
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
@@ -1809,7 +1841,7 @@ static int may_create(struct inode *dir,
int rc;
dsec = inode_security(dir);
- sbsec = dir->i_sb->s_security;
+ sbsec = selinux_superblock(dir->i_sb);
sid = tsec->sid;
@@ -1958,7 +1990,7 @@ static int superblock_has_perm(const struct cred *cred,
struct superblock_security_struct *sbsec;
u32 sid = cred_sid(cred);
- sbsec = sb->s_security;
+ sbsec = selinux_superblock(sb);
return avc_has_perm(&selinux_state,
sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
}
@@ -2034,11 +2066,8 @@ static inline u32 open_file_to_av(struct file *file)
static int selinux_binder_set_context_mgr(struct task_struct *mgr)
{
- u32 mysid = current_sid();
- u32 mgrsid = task_sid(mgr);
-
return avc_has_perm(&selinux_state,
- mysid, mgrsid, SECCLASS_BINDER,
+ current_sid(), task_sid_binder(mgr), SECCLASS_BINDER,
BINDER__SET_CONTEXT_MGR, NULL);
}
@@ -2046,8 +2075,7 @@ static int selinux_binder_transaction(struct task_struct *from,
struct task_struct *to)
{
u32 mysid = current_sid();
- u32 fromsid = task_sid(from);
- u32 tosid = task_sid(to);
+ u32 fromsid = task_sid_binder(from);
int rc;
if (mysid != fromsid) {
@@ -2058,19 +2086,16 @@ static int selinux_binder_transaction(struct task_struct *from,
return rc;
}
- return avc_has_perm(&selinux_state,
- fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
- NULL);
+ return avc_has_perm(&selinux_state, fromsid, task_sid_binder(to),
+ SECCLASS_BINDER, BINDER__CALL, NULL);
}
static int selinux_binder_transfer_binder(struct task_struct *from,
struct task_struct *to)
{
- u32 fromsid = task_sid(from);
- u32 tosid = task_sid(to);
-
return avc_has_perm(&selinux_state,
- fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
+ task_sid_binder(from), task_sid_binder(to),
+ SECCLASS_BINDER, BINDER__TRANSFER,
NULL);
}
@@ -2078,7 +2103,7 @@ static int selinux_binder_transfer_file(struct task_struct *from,
struct task_struct *to,
struct file *file)
{
- u32 sid = task_sid(to);
+ u32 sid = task_sid_binder(to);
struct file_security_struct *fsec = selinux_file(file);
struct dentry *dentry = file->f_path.dentry;
struct inode_security_struct *isec;
@@ -2114,10 +2139,10 @@ static int selinux_binder_transfer_file(struct task_struct *from,
}
static int selinux_ptrace_access_check(struct task_struct *child,
- unsigned int mode)
+ unsigned int mode)
{
u32 sid = current_sid();
- u32 csid = task_sid(child);
+ u32 csid = task_sid_obj(child);
if (mode & PTRACE_MODE_READ)
return avc_has_perm(&selinux_state,
@@ -2130,15 +2155,15 @@ static int selinux_ptrace_access_check(struct task_struct *child,
static int selinux_ptrace_traceme(struct task_struct *parent)
{
return avc_has_perm(&selinux_state,
- task_sid(parent), current_sid(), SECCLASS_PROCESS,
- PROCESS__PTRACE, NULL);
+ task_sid_subj(parent), task_sid_obj(current),
+ SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
}
static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(target), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(target), SECCLASS_PROCESS,
PROCESS__GETCAP, NULL);
}
@@ -2263,7 +2288,7 @@ static u32 ptrace_parent_sid(void)
rcu_read_lock();
tracer = ptrace_parent(current);
if (tracer)
- sid = task_sid(tracer);
+ sid = task_sid_obj(tracer);
rcu_read_unlock();
return sid;
@@ -2587,11 +2612,7 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
static int selinux_sb_alloc_security(struct super_block *sb)
{
- struct superblock_security_struct *sbsec;
-
- sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
- if (!sbsec)
- return -ENOMEM;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
mutex_init(&sbsec->lock);
INIT_LIST_HEAD(&sbsec->isec_head);
@@ -2599,16 +2620,10 @@ static int selinux_sb_alloc_security(struct super_block *sb)
sbsec->sid = SECINITSID_UNLABELED;
sbsec->def_sid = SECINITSID_FILE;
sbsec->mntpoint_sid = SECINITSID_UNLABELED;
- sb->s_security = sbsec;
return 0;
}
-static void selinux_sb_free_security(struct super_block *sb)
-{
- superblock_free_security(sb);
-}
-
static inline int opt_len(const char *s)
{
bool open_quote = false;
@@ -2684,13 +2699,68 @@ free_opt:
return rc;
}
-static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
+static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
{
struct selinux_mnt_opts *opts = mnt_opts;
struct superblock_security_struct *sbsec = sb->s_security;
u32 sid;
int rc;
+ /*
+ * Superblock not initialized (i.e. no options) - reject if any
+ * options specified, otherwise accept.
+ */
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ return opts ? 1 : 0;
+
+ /*
+ * Superblock initialized and no options specified - reject if
+ * superblock has any options set, otherwise accept.
+ */
+ if (!opts)
+ return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
+
+ if (opts->fscontext) {
+ rc = parse_sid(sb, opts->fscontext, &sid);
+ if (rc)
+ return 1;
+ if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
+ return 1;
+ }
+ if (opts->context) {
+ rc = parse_sid(sb, opts->context, &sid);
+ if (rc)
+ return 1;
+ if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
+ return 1;
+ }
+ if (opts->rootcontext) {
+ struct inode_security_struct *root_isec;
+
+ root_isec = backing_inode_security(sb->s_root);
+ rc = parse_sid(sb, opts->rootcontext, &sid);
+ if (rc)
+ return 1;
+ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
+ return 1;
+ }
+ if (opts->defcontext) {
+ rc = parse_sid(sb, opts->defcontext, &sid);
+ if (rc)
+ return 1;
+ if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
+ return 1;
+ }
+ return 0;
+}
+
+static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
+{
+ struct selinux_mnt_opts *opts = mnt_opts;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ u32 sid;
+ int rc;
+
if (!(sbsec->flags & SE_SBINITIALIZED))
return 0;
@@ -2925,7 +2995,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
int rc;
char *context;
- sbsec = dir->i_sb->s_security;
+ sbsec = selinux_superblock(dir->i_sb);
newsid = tsec->create_sid;
@@ -3227,7 +3297,7 @@ static int selinux_inode_setxattr(struct user_namespace *mnt_userns,
if (!selinux_initialized(&selinux_state))
return (inode_owner_or_capable(mnt_userns, inode) ? 0 : -EPERM);
- sbsec = inode->i_sb->s_security;
+ sbsec = selinux_superblock(inode->i_sb);
if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
@@ -3472,13 +3542,14 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct inode_security_struct *isec = inode_security_novalidate(inode);
- struct superblock_security_struct *sbsec = inode->i_sb->s_security;
+ struct superblock_security_struct *sbsec;
u32 newsid;
int rc;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
+ sbsec = selinux_superblock(inode->i_sb);
if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
@@ -3920,7 +3991,7 @@ static int selinux_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
struct file *file;
- u32 sid = task_sid(tsk);
+ u32 sid = task_sid_obj(tsk);
u32 perm;
struct file_security_struct *fsec;
@@ -4139,47 +4210,52 @@ static int selinux_kernel_load_data(enum kernel_load_data_id id, bool contents)
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETPGID, NULL);
}
static int selinux_task_getpgid(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__GETPGID, NULL);
}
static int selinux_task_getsid(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__GETSESSION, NULL);
}
-static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
+static void selinux_task_getsecid_subj(struct task_struct *p, u32 *secid)
+{
+ *secid = task_sid_subj(p);
+}
+
+static void selinux_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
- *secid = task_sid(p);
+ *secid = task_sid_obj(p);
}
static int selinux_task_setnice(struct task_struct *p, int nice)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_setioprio(struct task_struct *p, int ioprio)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_getioprio(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__GETSCHED, NULL);
}
@@ -4210,7 +4286,7 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max)
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p),
+ current_sid(), task_sid_obj(p),
SECCLASS_PROCESS, PROCESS__SETRLIMIT, NULL);
return 0;
@@ -4219,21 +4295,21 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
static int selinux_task_setscheduler(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_getscheduler(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__GETSCHED, NULL);
}
static int selinux_task_movememory(struct task_struct *p)
{
return avc_has_perm(&selinux_state,
- current_sid(), task_sid(p), SECCLASS_PROCESS,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
@@ -4252,14 +4328,14 @@ static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info,
else
secid = cred_sid(cred);
return avc_has_perm(&selinux_state,
- secid, task_sid(p), SECCLASS_PROCESS, perm, NULL);
+ secid, task_sid_obj(p), SECCLASS_PROCESS, perm, NULL);
}
static void selinux_task_to_inode(struct task_struct *p,
struct inode *inode)
{
struct inode_security_struct *isec = selinux_inode(inode);
- u32 sid = task_sid(p);
+ u32 sid = task_sid_obj(p);
spin_lock(&isec->lock);
isec->sclass = inode_mode_to_security_class(inode->i_mode);
@@ -6152,7 +6228,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
- u32 sid = task_sid(target);
+ u32 sid = task_sid_subj(target);
int rc;
isec = selinux_ipc(msq);
@@ -6975,6 +7051,7 @@ struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
.lbs_inode = sizeof(struct inode_security_struct),
.lbs_ipc = sizeof(struct ipc_security_struct),
.lbs_msg_msg = sizeof(struct msg_security_struct),
+ .lbs_superblock = sizeof(struct superblock_security_struct),
};
#ifdef CONFIG_PERF_EVENTS
@@ -7075,8 +7152,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
- LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security),
LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts),
+ LSM_HOOK_INIT(sb_mnt_opts_compat, selinux_sb_mnt_opts_compat),
LSM_HOOK_INIT(sb_remount, selinux_sb_remount),
LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount),
LSM_HOOK_INIT(sb_show_options, selinux_sb_show_options),
@@ -7148,7 +7225,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
LSM_HOOK_INIT(task_getsid, selinux_task_getsid),
- LSM_HOOK_INIT(task_getsecid, selinux_task_getsecid),
+ LSM_HOOK_INIT(task_getsecid_subj, selinux_task_getsecid_subj),
+ LSM_HOOK_INIT(task_getsecid_obj, selinux_task_getsecid_obj),
LSM_HOOK_INIT(task_setnice, selinux_task_setnice),
LSM_HOOK_INIT(task_setioprio, selinux_task_setioprio),
LSM_HOOK_INIT(task_getioprio, selinux_task_getioprio),
diff --git a/security/selinux/ima.c b/security/selinux/ima.c
index 03715893ff97..34d421861bfc 100644
--- a/security/selinux/ima.c
+++ b/security/selinux/ima.c
@@ -13,18 +13,83 @@
#include "ima.h"
/*
- * selinux_ima_measure_state - Measure hash of the SELinux policy
+ * selinux_ima_collect_state - Read selinux configuration settings
*
- * @state: selinux state struct
+ * @state: selinux_state
*
- * NOTE: This function must be called with policy_mutex held.
+ * On success returns the configuration settings string.
+ * On error, returns NULL.
*/
-void selinux_ima_measure_state(struct selinux_state *state)
+static char *selinux_ima_collect_state(struct selinux_state *state)
{
+ const char *on = "=1;", *off = "=0;";
+ char *buf;
+ int buf_len, len, i, rc;
+
+ buf_len = strlen("initialized=0;enforcing=0;checkreqprot=0;") + 1;
+
+ len = strlen(on);
+ for (i = 0; i < __POLICYDB_CAPABILITY_MAX; i++)
+ buf_len += strlen(selinux_policycap_names[i]) + len;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ rc = strscpy(buf, "initialized", buf_len);
+ WARN_ON(rc < 0);
+
+ rc = strlcat(buf, selinux_initialized(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, "enforcing", buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, enforcing_enabled(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, "checkreqprot", buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, checkreqprot_get(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ for (i = 0; i < __POLICYDB_CAPABILITY_MAX; i++) {
+ rc = strlcat(buf, selinux_policycap_names[i], buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, state->policycap[i] ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+ }
+
+ return buf;
+}
+
+/*
+ * selinux_ima_measure_state_locked - Measure SELinux state and hash of policy
+ *
+ * @state: selinux state struct
+ */
+void selinux_ima_measure_state_locked(struct selinux_state *state)
+{
+ char *state_str = NULL;
void *policy = NULL;
size_t policy_len;
int rc = 0;
+ WARN_ON(!mutex_is_locked(&state->policy_mutex));
+
+ state_str = selinux_ima_collect_state(state);
+ if (!state_str) {
+ pr_err("SELinux: %s: failed to read state.\n", __func__);
+ return;
+ }
+
+ ima_measure_critical_data("selinux", "selinux-state",
+ state_str, strlen(state_str), false);
+
+ kfree(state_str);
+
/*
* Measure SELinux policy only after initialization is completed.
*/
@@ -42,3 +107,17 @@ void selinux_ima_measure_state(struct selinux_state *state)
vfree(policy);
}
+
+/*
+ * selinux_ima_measure_state - Measure SELinux state and hash of policy
+ *
+ * @state: selinux state struct
+ */
+void selinux_ima_measure_state(struct selinux_state *state)
+{
+ WARN_ON(mutex_is_locked(&state->policy_mutex));
+
+ mutex_lock(&state->policy_mutex);
+ selinux_ima_measure_state_locked(state);
+ mutex_unlock(&state->policy_mutex);
+}
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index ba2e01a6955c..62d19bccf3de 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
{ "infiniband_endport",
{ "manage_subnet", NULL } },
{ "bpf",
- {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
+ { "map_create", "map_read", "map_write", "prog_load", "prog_run",
+ NULL } },
{ "xdp_socket",
{ COMMON_SOCK_PERMS, NULL } },
{ "perf_event",
- {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
+ { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
{ "lockdown",
{ "integrity", "confidentiality", NULL } },
{ "anon_inode",
diff --git a/security/selinux/include/ima.h b/security/selinux/include/ima.h
index d69c36611423..75ca92b4a462 100644
--- a/security/selinux/include/ima.h
+++ b/security/selinux/include/ima.h
@@ -15,10 +15,16 @@
#ifdef CONFIG_IMA
extern void selinux_ima_measure_state(struct selinux_state *selinux_state);
+extern void selinux_ima_measure_state_locked(
+ struct selinux_state *selinux_state);
#else
static inline void selinux_ima_measure_state(struct selinux_state *selinux_state)
{
}
+static inline void selinux_ima_measure_state_locked(
+ struct selinux_state *selinux_state)
+{
+}
#endif
#endif /* _SELINUX_IMA_H_ */
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index ca4d7ab6a835..2953132408bf 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -188,4 +188,10 @@ static inline u32 current_sid(void)
return tsec->sid;
}
+static inline struct superblock_security_struct *selinux_superblock(
+ const struct super_block *superblock)
+{
+ return superblock->s_security + selinux_blob_sizes.lbs_superblock;
+}
+
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 6fe25300b89d..ac0ece01305a 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -219,14 +219,21 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
}
+struct selinux_policy_convert_data;
+
+struct selinux_load_state {
+ struct selinux_policy *policy;
+ struct selinux_policy_convert_data *convert_data;
+};
+
int security_mls_enabled(struct selinux_state *state);
int security_load_policy(struct selinux_state *state,
- void *data, size_t len,
- struct selinux_policy **newpolicyp);
+ void *data, size_t len,
+ struct selinux_load_state *load_state);
void selinux_policy_commit(struct selinux_state *state,
- struct selinux_policy *newpolicy);
+ struct selinux_load_state *load_state);
void selinux_policy_cancel(struct selinux_state *state,
- struct selinux_policy *policy);
+ struct selinux_load_state *load_state);
int security_read_policy(struct selinux_state *state,
void **data, size_t *len);
int security_read_state_kernel(struct selinux_state *state,
@@ -419,7 +426,7 @@ extern struct page *selinux_kernel_status_page(struct selinux_state *state);
#define SELINUX_KERNEL_STATUS_VERSION 1
struct selinux_kernel_status {
- u32 version; /* version number of thie structure */
+ u32 version; /* version number of the structure */
u32 sequence; /* sequence number of seqlock logic */
u32 enforcing; /* current setting of enforcing mode */
u32 policyload; /* times of policy reloaded */
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index b69231918686..d59276f48d4f 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -88,6 +88,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETVLAN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -171,7 +174,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWVLAN + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOPBUCKET + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 01a7d50ed39b..e4cd7cb856f3 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -41,6 +41,7 @@
#include "security.h"
#include "objsec.h"
#include "conditional.h"
+#include "ima.h"
enum sel_inos {
SEL_ROOT_INO = 2,
@@ -182,6 +183,8 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
selinux_status_update_setenforce(state, new_value);
if (!new_value)
call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+
+ selinux_ima_measure_state(state);
}
length = count;
out:
@@ -563,17 +566,13 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num,
&tmp_bool_names, &tmp_bool_values);
- if (ret) {
- pr_err("SELinux: failed to load policy booleans\n");
+ if (ret)
goto out;
- }
ret = sel_make_classes(newpolicy, tmp_class_dir,
&fsi->last_class_ino);
- if (ret) {
- pr_err("SELinux: failed to load policy classes\n");
+ if (ret)
goto out;
- }
/* booleans */
old_dentry = fsi->bool_dir;
@@ -616,7 +615,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
{
struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
- struct selinux_policy *newpolicy;
+ struct selinux_load_state load_state;
ssize_t length;
void *data = NULL;
@@ -642,23 +641,23 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (copy_from_user(data, buf, count) != 0)
goto out;
- length = security_load_policy(fsi->state, data, count, &newpolicy);
+ length = security_load_policy(fsi->state, data, count, &load_state);
if (length) {
pr_warn_ratelimited("SELinux: failed to load policy\n");
goto out;
}
- length = sel_make_policy_nodes(fsi, newpolicy);
+ length = sel_make_policy_nodes(fsi, load_state.policy);
if (length) {
- selinux_policy_cancel(fsi->state, newpolicy);
- goto out1;
+ pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n");
+ selinux_policy_cancel(fsi->state, &load_state);
+ goto out;
}
- selinux_policy_commit(fsi->state, newpolicy);
+ selinux_policy_commit(fsi->state, &load_state);
length = count;
-out1:
audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
"auid=%u ses=%u lsm=selinux res=1",
from_kuid(&init_user_ns, audit_get_loginuid(current)),
@@ -762,6 +761,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
checkreqprot_set(fsi->state, (new_value ? 1 : 0));
length = count;
+
+ selinux_ima_measure_state(fsi->state);
+
out:
kfree(page);
return length;
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 6dcb6aa4db7f..75df32906055 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
struct avtab_node *prev, *cur, *newnode;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return -EINVAL;
hvalue = avtab_hash(key, h->mask);
@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
struct avtab_node *prev, *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
for (prev = NULL, cur = h->htable[hvalue];
@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
}
kvfree(h->htable);
h->htable = NULL;
+ h->nel = 0;
h->nslot = 0;
h->mask = 0;
}
@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
{
h->htable = NULL;
h->nel = 0;
+ h->nslot = 0;
+ h->mask = 0;
}
-int avtab_alloc(struct avtab *h, u32 nrules)
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
{
- u32 mask = 0;
- u32 shift = 0;
- u32 work = nrules;
- u32 nslot = 0;
-
- if (nrules == 0)
- goto avtab_alloc_out;
-
- while (work) {
- work = work >> 1;
- shift++;
- }
- if (shift > 2)
- shift = shift - 2;
- nslot = 1 << shift;
- if (nslot > MAX_AVTAB_HASH_BUCKETS)
- nslot = MAX_AVTAB_HASH_BUCKETS;
- mask = nslot - 1;
+ if (!nslot)
+ return 0;
h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
if (!h->htable)
return -ENOMEM;
- avtab_alloc_out:
- h->nel = 0;
h->nslot = nslot;
- h->mask = mask;
- pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
- h->nslot, nrules);
+ h->mask = nslot - 1;
return 0;
}
-int avtab_duplicate(struct avtab *new, struct avtab *orig)
+int avtab_alloc(struct avtab *h, u32 nrules)
{
- int i;
- struct avtab_node *node, *tmp, *tail;
-
- memset(new, 0, sizeof(*new));
+ int rc;
+ u32 nslot = 0;
- new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
- if (!new->htable)
- return -ENOMEM;
- new->nslot = orig->nslot;
- new->mask = orig->mask;
-
- for (i = 0; i < orig->nslot; i++) {
- tail = NULL;
- for (node = orig->htable[i]; node; node = node->next) {
- tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
- if (!tmp)
- goto error;
- tmp->key = node->key;
- if (tmp->key.specified & AVTAB_XPERMS) {
- tmp->datum.u.xperms =
- kmem_cache_zalloc(avtab_xperms_cachep,
- GFP_KERNEL);
- if (!tmp->datum.u.xperms) {
- kmem_cache_free(avtab_node_cachep, tmp);
- goto error;
- }
- tmp->datum.u.xperms = node->datum.u.xperms;
- } else
- tmp->datum.u.data = node->datum.u.data;
-
- if (tail)
- tail->next = tmp;
- else
- new->htable[i] = tmp;
-
- tail = tmp;
- new->nel++;
+ if (nrules != 0) {
+ u32 shift = 1;
+ u32 work = nrules >> 3;
+ while (work) {
+ work >>= 1;
+ shift++;
}
+ nslot = 1 << shift;
+ if (nslot > MAX_AVTAB_HASH_BUCKETS)
+ nslot = MAX_AVTAB_HASH_BUCKETS;
+
+ rc = avtab_alloc_common(h, nslot);
+ if (rc)
+ return rc;
}
+ pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
return 0;
-error:
- avtab_destroy(new);
- return -ENOMEM;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+ return avtab_alloc_common(new, orig->nslot);
}
void avtab_hash_eval(struct avtab *h, char *tag)
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 4c4445ca9118..f2eeb36265d1 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -89,7 +89,7 @@ struct avtab {
void avtab_init(struct avtab *h);
int avtab_alloc(struct avtab *, u32);
-int avtab_duplicate(struct avtab *new, struct avtab *orig);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
void avtab_destroy(struct avtab *h);
void avtab_hash_eval(struct avtab *h, char *tag);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 0b32f3ab025e..1ef74c085f2b 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
struct cond_av_list *orig,
struct avtab *avtab)
{
- struct avtab_node *avnode;
u32 i;
memset(new, 0, sizeof(*new));
@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
return -ENOMEM;
for (i = 0; i < orig->len; i++) {
- avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
- if (WARN_ON(!avnode))
- return -EINVAL;
- new->nodes[i] = avnode;
+ new->nodes[i] = avtab_insert_nonunique(avtab,
+ &orig->nodes[i]->key,
+ &orig->nodes[i]->datum);
+ if (!new->nodes[i])
+ return -ENOMEM;
new->len++;
}
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
{
int rc, i, j;
- rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
+ rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
if (rc)
return rc;
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 3881787ce492..b8f6b3e0a921 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -13,7 +13,7 @@ static struct kmem_cache *hashtab_node_cachep __ro_after_init;
/*
* Here we simply round the number of elements up to the nearest power of two.
- * I tried also other options like rouding down or rounding to the closest
+ * I tried also other options like rounding down or rounding to the closest
* power of two (up or down based on which is closer), but I was unable to
* find any significant difference in lookup/insert performance that would
* justify switching to a different (less intuitive) formula. It could be that
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 3438d0130378..0a5ce001609b 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -47,6 +47,7 @@
#include <linux/sched.h>
#include <linux/audit.h>
#include <linux/vmalloc.h>
+#include <linux/lsm_hooks.h>
#include <net/netlabel.h>
#include "flask.h"
@@ -67,6 +68,17 @@
#include "policycap_names.h"
#include "ima.h"
+struct convert_context_args {
+ struct selinux_state *state;
+ struct policydb *oldp;
+ struct policydb *newp;
+};
+
+struct selinux_policy_convert_data {
+ struct convert_context_args args;
+ struct sidtab_convert_params sidtab_params;
+};
+
/* Forward declaration. */
static int context_struct_to_string(struct policydb *policydb,
struct context *context,
@@ -1541,6 +1553,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
if (!str)
goto out;
}
+retry:
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -1554,6 +1567,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
} else if (rc)
goto out_unlock;
rc = sidtab_context_to_sid(sidtab, &context, sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ if (context.str) {
+ str = context.str;
+ context.str = NULL;
+ }
+ context_destroy(&context);
+ goto retry;
+ }
context_destroy(&context);
out_unlock:
rcu_read_unlock();
@@ -1703,7 +1725,7 @@ static int security_compute_sid(struct selinux_state *state,
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- struct class_datum *cladatum = NULL;
+ struct class_datum *cladatum;
struct context *scontext, *tcontext, newcontext;
struct sidtab_entry *sentry, *tentry;
struct avtab_key avkey;
@@ -1725,6 +1747,8 @@ static int security_compute_sid(struct selinux_state *state,
goto out;
}
+retry:
+ cladatum = NULL;
context_init(&newcontext);
rcu_read_lock();
@@ -1869,6 +1893,11 @@ static int security_compute_sid(struct selinux_state *state,
}
/* Obtain the sid for the context. */
rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcontext);
+ goto retry;
+ }
out_unlock:
rcu_read_unlock();
context_destroy(&newcontext);
@@ -1974,12 +2003,6 @@ static inline int convert_context_handle_invalid_context(
return 0;
}
-struct convert_context_args {
- struct selinux_state *state;
- struct policydb *oldp;
- struct policydb *newp;
-};
-
/*
* Convert the values in the security context
* structure `oldc' from the values specified
@@ -2159,7 +2182,7 @@ static void selinux_policy_cond_free(struct selinux_policy *policy)
}
void selinux_policy_cancel(struct selinux_state *state,
- struct selinux_policy *policy)
+ struct selinux_load_state *load_state)
{
struct selinux_policy *oldpolicy;
@@ -2167,7 +2190,8 @@ void selinux_policy_cancel(struct selinux_state *state,
lockdep_is_held(&state->policy_mutex));
sidtab_cancel_convert(oldpolicy->sidtab);
- selinux_policy_free(policy);
+ selinux_policy_free(load_state->policy);
+ kfree(load_state->convert_data);
}
static void selinux_notify_policy_change(struct selinux_state *state,
@@ -2179,13 +2203,14 @@ static void selinux_notify_policy_change(struct selinux_state *state,
selinux_status_update_policyload(state, seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- selinux_ima_measure_state(state);
+ selinux_ima_measure_state_locked(state);
}
void selinux_policy_commit(struct selinux_state *state,
- struct selinux_policy *newpolicy)
+ struct selinux_load_state *load_state)
{
- struct selinux_policy *oldpolicy;
+ struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+ unsigned long flags;
u32 seqno;
oldpolicy = rcu_dereference_protected(state->policy,
@@ -2207,7 +2232,13 @@ void selinux_policy_commit(struct selinux_state *state,
seqno = newpolicy->latest_granting;
/* Install the new policy. */
- rcu_assign_pointer(state->policy, newpolicy);
+ if (oldpolicy) {
+ sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+ rcu_assign_pointer(state->policy, newpolicy);
+ sidtab_freeze_end(oldpolicy->sidtab, &flags);
+ } else {
+ rcu_assign_pointer(state->policy, newpolicy);
+ }
/* Load the policycaps from the new policy */
security_load_policycaps(state, newpolicy);
@@ -2225,6 +2256,7 @@ void selinux_policy_commit(struct selinux_state *state,
/* Free the old policy */
synchronize_rcu();
selinux_policy_free(oldpolicy);
+ kfree(load_state->convert_data);
/* Notify others of the policy change */
selinux_notify_policy_change(state, seqno);
@@ -2241,11 +2273,10 @@ void selinux_policy_commit(struct selinux_state *state,
* loading the new policy.
*/
int security_load_policy(struct selinux_state *state, void *data, size_t len,
- struct selinux_policy **newpolicyp)
+ struct selinux_load_state *load_state)
{
struct selinux_policy *newpolicy, *oldpolicy;
- struct sidtab_convert_params convert_params;
- struct convert_context_args args;
+ struct selinux_policy_convert_data *convert_data;
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
@@ -2275,10 +2306,10 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
goto err_mapping;
}
-
if (!selinux_initialized(state)) {
/* First policy load, so no need to preserve state from old policy */
- *newpolicyp = newpolicy;
+ load_state->policy = newpolicy;
+ load_state->convert_data = NULL;
return 0;
}
@@ -2292,29 +2323,38 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
goto err_free_isids;
}
+ convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
+ if (!convert_data) {
+ rc = -ENOMEM;
+ goto err_free_isids;
+ }
+
/*
* Convert the internal representations of contexts
* in the new SID table.
*/
- args.state = state;
- args.oldp = &oldpolicy->policydb;
- args.newp = &newpolicy->policydb;
+ convert_data->args.state = state;
+ convert_data->args.oldp = &oldpolicy->policydb;
+ convert_data->args.newp = &newpolicy->policydb;
- convert_params.func = convert_context;
- convert_params.args = &args;
- convert_params.target = newpolicy->sidtab;
+ convert_data->sidtab_params.func = convert_context;
+ convert_data->sidtab_params.args = &convert_data->args;
+ convert_data->sidtab_params.target = newpolicy->sidtab;
- rc = sidtab_convert(oldpolicy->sidtab, &convert_params);
+ rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
if (rc) {
pr_err("SELinux: unable to convert the internal"
" representation of contexts in the new SID"
" table\n");
- goto err_free_isids;
+ goto err_free_convert_data;
}
- *newpolicyp = newpolicy;
+ load_state->policy = newpolicy;
+ load_state->convert_data = convert_data;
return 0;
+err_free_convert_data:
+ kfree(convert_data);
err_free_isids:
sidtab_destroy(newpolicy->sidtab);
err_mapping:
@@ -2342,13 +2382,15 @@ int security_port_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_PORT;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2367,6 +2409,10 @@ int security_port_sid(struct selinux_state *state,
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2393,13 +2439,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_UNLABELED;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2420,6 +2468,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
rc = sidtab_context_to_sid(sidtab,
&c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2445,13 +2497,15 @@ int security_ib_endport_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_UNLABELED;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2472,6 +2526,10 @@ int security_ib_endport_sid(struct selinux_state *state,
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2495,7 +2553,7 @@ int security_netif_sid(struct selinux_state *state,
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- int rc = 0;
+ int rc;
struct ocontext *c;
if (!selinux_initialized(state)) {
@@ -2503,6 +2561,8 @@ int security_netif_sid(struct selinux_state *state,
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2519,10 +2579,18 @@ int security_netif_sid(struct selinux_state *state,
if (!c->sid[0] || !c->sid[1]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
rc = sidtab_context_to_sid(sidtab, &c->context[1],
&c->sid[1]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2572,6 +2640,7 @@ int security_node_sid(struct selinux_state *state,
return 0;
}
+retry:
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2620,6 +2689,10 @@ int security_node_sid(struct selinux_state *state,
rc = sidtab_context_to_sid(sidtab,
&c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2661,18 +2734,24 @@ int security_get_user_sids(struct selinux_state *state,
struct sidtab *sidtab;
struct context *fromcon, usercon;
u32 *mysids = NULL, *mysids2, sid;
- u32 mynel = 0, maxnel = SIDS_NEL;
+ u32 i, j, mynel, maxnel = SIDS_NEL;
struct user_datum *user;
struct role_datum *role;
struct ebitmap_node *rnode, *tnode;
- int rc = 0, i, j;
+ int rc;
*sids = NULL;
*nel = 0;
if (!selinux_initialized(state))
- goto out;
+ return 0;
+
+ mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+ if (!mysids)
+ return -ENOMEM;
+retry:
+ mynel = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2692,11 +2771,6 @@ int security_get_user_sids(struct selinux_state *state,
usercon.user = user->value;
- rc = -ENOMEM;
- mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
- if (!mysids)
- goto out_unlock;
-
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
role = policydb->role_val_to_struct[i];
usercon.role = i + 1;
@@ -2708,6 +2782,10 @@ int security_get_user_sids(struct selinux_state *state,
continue;
rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out_unlock;
if (mynel < maxnel) {
@@ -2730,14 +2808,14 @@ out_unlock:
rcu_read_unlock();
if (rc || !mynel) {
kfree(mysids);
- goto out;
+ return rc;
}
rc = -ENOMEM;
mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
if (!mysids2) {
kfree(mysids);
- goto out;
+ return rc;
}
for (i = 0, j = 0; i < mynel; i++) {
struct av_decision dummy_avd;
@@ -2750,12 +2828,10 @@ out_unlock:
mysids2[j++] = mysids[i];
cond_resched();
}
- rc = 0;
kfree(mysids);
*sids = mysids2;
*nel = j;
-out:
- return rc;
+ return 0;
}
/**
@@ -2768,6 +2844,9 @@ out:
* Obtain a SID to use for a file in a filesystem that
* cannot support xattr or use a fixed labeling behavior like
* transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
*/
static inline int __security_genfs_sid(struct selinux_policy *policy,
const char *fstype,
@@ -2846,11 +2925,13 @@ int security_genfs_sid(struct selinux_state *state,
return 0;
}
- rcu_read_lock();
- policy = rcu_dereference(state->policy);
- retval = __security_genfs_sid(policy,
- fstype, path, orig_sclass, sid);
- rcu_read_unlock();
+ do {
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ retval = __security_genfs_sid(policy, fstype, path,
+ orig_sclass, sid);
+ rcu_read_unlock();
+ } while (retval == -ESTALE);
return retval;
}
@@ -2873,9 +2954,9 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- int rc = 0;
+ int rc;
struct ocontext *c;
- struct superblock_security_struct *sbsec = sb->s_security;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
const char *fstype = sb->s_type->name;
if (!selinux_initialized(state)) {
@@ -2884,6 +2965,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2901,6 +2984,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2908,6 +2995,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
} else {
rc = __security_genfs_sid(policy, fstype, "/",
SECCLASS_DIR, &sbsec->sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc) {
sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
@@ -3117,12 +3208,13 @@ int security_sid_mls_copy(struct selinux_state *state,
u32 len;
int rc;
- rc = 0;
if (!selinux_initialized(state)) {
*new_sid = sid;
- goto out;
+ return 0;
}
+retry:
+ rc = 0;
context_init(&newcon);
rcu_read_lock();
@@ -3181,10 +3273,14 @@ int security_sid_mls_copy(struct selinux_state *state,
}
}
rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcon);
+ goto retry;
+ }
out_unlock:
rcu_read_unlock();
context_destroy(&newcon);
-out:
return rc;
}
@@ -3777,6 +3873,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -3803,23 +3901,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
goto out;
}
rc = -EIDRM;
- if (!mls_context_isvalid(policydb, &ctx_new))
- goto out_free;
+ if (!mls_context_isvalid(policydb, &ctx_new)) {
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ goto out;
+ }
rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
- goto out_free;
+ goto out;
security_netlbl_cache_add(secattr, *sid);
-
- ebitmap_destroy(&ctx_new.range.level[0].cat);
} else
*sid = SECSID_NULL;
- rcu_read_unlock();
- return 0;
-out_free:
- ebitmap_destroy(&ctx_new.range.level[0].cat);
out:
rcu_read_unlock();
return rc;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 5ee190bd30f5..656d50b09f76 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
+ s->frozen = false;
s->count = 0;
s->convert = NULL;
hash_init(s->context_to_sid);
@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
if (*sid)
goto out_unlock;
+ if (unlikely(s->frozen)) {
+ /*
+ * This sidtab is now frozen - tell the caller to abort and
+ * get the new one.
+ */
+ rc = -ESTALE;
+ goto out_unlock;
+ }
+
count = s->count;
convert = s->convert;
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
spin_unlock_irqrestore(&s->lock, flags);
}
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+ spin_lock_irqsave(&s->lock, *flags);
+ s->frozen = true;
+ s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+ spin_unlock_irqrestore(&s->lock, *flags);
+}
+
static void sidtab_destroy_entry(struct sidtab_entry *entry)
{
context_destroy(&entry->context);
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index 80c744d07ad6..4eff0e49dcb2 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -86,6 +86,7 @@ struct sidtab {
u32 count;
/* access only under spinlock */
struct sidtab_convert_params *convert;
+ bool frozen;
spinlock_t lock;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
void sidtab_cancel_convert(struct sidtab *s);
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s);
diff --git a/security/smack/smack.h b/security/smack/smack.h
index a9768b12716b..c3cfbdf4944a 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -357,6 +357,12 @@ static inline struct smack_known **smack_ipc(const struct kern_ipc_perm *ipc)
return ipc->security + smack_blob_sizes.lbs_ipc;
}
+static inline struct superblock_smack *smack_superblock(
+ const struct super_block *superblock)
+{
+ return superblock->s_security + smack_blob_sizes.lbs_superblock;
+}
+
/*
* Is the directory transmuting?
*/
@@ -383,7 +389,23 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
return tsp->smk_task;
}
-static inline struct smack_known *smk_of_task_struct(
+static inline struct smack_known *smk_of_task_struct_subj(
+ const struct task_struct *t)
+{
+ struct smack_known *skp;
+ const struct cred *cred;
+
+ rcu_read_lock();
+
+ cred = rcu_dereference(t->cred);
+ skp = smk_of_task(smack_cred(cred));
+
+ rcu_read_unlock();
+
+ return skp;
+}
+
+static inline struct smack_known *smk_of_task_struct_obj(
const struct task_struct *t)
{
struct smack_known *skp;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 12a45e61c1a5..223a6da0e6dc 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -159,7 +159,7 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
static int smk_bu_task(struct task_struct *otp, int mode, int rc)
{
struct task_smack *tsp = smack_cred(current_cred());
- struct smack_known *smk_task = smk_of_task_struct(otp);
+ struct smack_known *smk_task = smk_of_task_struct_obj(otp);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (rc <= 0)
@@ -479,7 +479,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
struct smack_known *skp;
- skp = smk_of_task_struct(ctp);
+ skp = smk_of_task_struct_obj(ctp);
return smk_ptrace_rule_check(current, skp, mode, __func__);
}
@@ -535,12 +535,7 @@ static int smack_syslog(int typefrom_file)
*/
static int smack_sb_alloc_security(struct super_block *sb)
{
- struct superblock_smack *sbsp;
-
- sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL);
-
- if (sbsp == NULL)
- return -ENOMEM;
+ struct superblock_smack *sbsp = smack_superblock(sb);
sbsp->smk_root = &smack_known_floor;
sbsp->smk_default = &smack_known_floor;
@@ -549,22 +544,10 @@ static int smack_sb_alloc_security(struct super_block *sb)
/*
* SMK_SB_INITIALIZED will be zero from kzalloc.
*/
- sb->s_security = sbsp;
return 0;
}
-/**
- * smack_sb_free_security - free a superblock blob
- * @sb: the superblock getting the blob
- *
- */
-static void smack_sb_free_security(struct super_block *sb)
-{
- kfree(sb->s_security);
- sb->s_security = NULL;
-}
-
struct smack_mnt_opts {
const char *fsdefault, *fsfloor, *fshat, *fsroot, *fstransmute;
};
@@ -772,7 +755,7 @@ static int smack_set_mnt_opts(struct super_block *sb,
{
struct dentry *root = sb->s_root;
struct inode *inode = d_backing_inode(root);
- struct superblock_smack *sp = sb->s_security;
+ struct superblock_smack *sp = smack_superblock(sb);
struct inode_smack *isp;
struct smack_known *skp;
struct smack_mnt_opts *opts = mnt_opts;
@@ -871,7 +854,7 @@ static int smack_set_mnt_opts(struct super_block *sb,
*/
static int smack_sb_statfs(struct dentry *dentry)
{
- struct superblock_smack *sbp = dentry->d_sb->s_security;
+ struct superblock_smack *sbp = smack_superblock(dentry->d_sb);
int rc;
struct smk_audit_info ad;
@@ -905,7 +888,7 @@ static int smack_bprm_creds_for_exec(struct linux_binprm *bprm)
if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
- sbsp = inode->i_sb->s_security;
+ sbsp = smack_superblock(inode->i_sb);
if ((sbsp->smk_flags & SMK_SB_UNTRUSTED) &&
isp->smk_task != sbsp->smk_root)
return 0;
@@ -1157,7 +1140,7 @@ static int smack_inode_rename(struct inode *old_inode,
*/
static int smack_inode_permission(struct inode *inode, int mask)
{
- struct superblock_smack *sbsp = inode->i_sb->s_security;
+ struct superblock_smack *sbsp = smack_superblock(inode->i_sb);
struct smk_audit_info ad;
int no_block = mask & MAY_NOT_BLOCK;
int rc;
@@ -1400,7 +1383,7 @@ static int smack_inode_removexattr(struct user_namespace *mnt_userns,
*/
if (strcmp(name, XATTR_NAME_SMACK) == 0) {
struct super_block *sbp = dentry->d_sb;
- struct superblock_smack *sbsp = sbp->s_security;
+ struct superblock_smack *sbsp = smack_superblock(sbp);
isp->smk_inode = sbsp->smk_default;
} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0)
@@ -1670,7 +1653,7 @@ static int smack_mmap_file(struct file *file,
isp = smack_inode(file_inode(file));
if (isp->smk_mmap == NULL)
return 0;
- sbsp = file_inode(file)->i_sb->s_security;
+ sbsp = smack_superblock(file_inode(file)->i_sb);
if (sbsp->smk_flags & SMK_SB_UNTRUSTED &&
isp->smk_mmap != sbsp->smk_root)
return -EACCES;
@@ -2033,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
- struct smack_known *skp = smk_of_task_struct(p);
+ struct smack_known *skp = smk_of_task_struct_subj(p);
int rc;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -2078,15 +2061,29 @@ static int smack_task_getsid(struct task_struct *p)
}
/**
- * smack_task_getsecid - get the secid of the task
- * @p: the object task
+ * smack_task_getsecid_subj - get the subjective secid of the task
+ * @p: the task
* @secid: where to put the result
*
- * Sets the secid to contain a u32 version of the smack label.
+ * Sets the secid to contain a u32 version of the task's subjective smack label.
+ */
+static void smack_task_getsecid_subj(struct task_struct *p, u32 *secid)
+{
+ struct smack_known *skp = smk_of_task_struct_subj(p);
+
+ *secid = skp->smk_secid;
+}
+
+/**
+ * smack_task_getsecid_obj - get the objective secid of the task
+ * @p: the task
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the task's objective smack label.
*/
-static void smack_task_getsecid(struct task_struct *p, u32 *secid)
+static void smack_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
- struct smack_known *skp = smk_of_task_struct(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
*secid = skp->smk_secid;
}
@@ -2174,7 +2171,7 @@ static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
{
struct smk_audit_info ad;
struct smack_known *skp;
- struct smack_known *tkp = smk_of_task_struct(p);
+ struct smack_known *tkp = smk_of_task_struct_obj(p);
int rc;
if (!sig)
@@ -2212,7 +2209,7 @@ static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = smack_inode(inode);
- struct smack_known *skp = smk_of_task_struct(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
isp->smk_inode = skp;
isp->smk_flags |= SMK_INODE_INSTANT;
@@ -3285,7 +3282,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
return;
sbp = inode->i_sb;
- sbsp = sbp->s_security;
+ sbsp = smack_superblock(sbp);
/*
* We're going to use the superblock default label
* if there's no label on the file.
@@ -3483,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
- struct smack_known *skp = smk_of_task_struct(p);
+ struct smack_known *skp = smk_of_task_struct_subj(p);
char *cp;
int slen;
@@ -4700,6 +4697,7 @@ struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
.lbs_inode = sizeof(struct inode_smack),
.lbs_ipc = sizeof(struct smack_known *),
.lbs_msg_msg = sizeof(struct smack_known *),
+ .lbs_superblock = sizeof(struct superblock_smack),
};
static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
@@ -4711,7 +4709,6 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security),
- LSM_HOOK_INIT(sb_free_security, smack_sb_free_security),
LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts),
LSM_HOOK_INIT(sb_eat_lsm_opts, smack_sb_eat_lsm_opts),
LSM_HOOK_INIT(sb_statfs, smack_sb_statfs),
@@ -4759,7 +4756,8 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(task_setpgid, smack_task_setpgid),
LSM_HOOK_INIT(task_getpgid, smack_task_getpgid),
LSM_HOOK_INIT(task_getsid, smack_task_getsid),
- LSM_HOOK_INIT(task_getsecid, smack_task_getsecid),
+ LSM_HOOK_INIT(task_getsecid_subj, smack_task_getsecid_subj),
+ LSM_HOOK_INIT(task_getsecid_obj, smack_task_getsecid_obj),
LSM_HOOK_INIT(task_setnice, smack_task_setnice),
LSM_HOOK_INIT(task_setioprio, smack_task_setioprio),
LSM_HOOK_INIT(task_getioprio, smack_task_getioprio),
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
index 478f757ff843..8dc61335f65e 100644
--- a/security/tomoyo/network.c
+++ b/security/tomoyo/network.c
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
static bool tomoyo_kernel_service(void)
{
/* Nothing to do if I am a kernel service. */
- return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+ return current->flags & PF_KTHREAD;
}
/**