diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-28 12:37:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-28 12:37:13 -0700 |
commit | 72885116069abdd05c245707c3989fc605632970 (patch) | |
tree | 7afe47997b8768f2a2300c08d69416d1524360af /security | |
parent | 78fb88eca684ed6f09f01a232c925b6da75d8131 (diff) | |
parent | 8e2dd47b10e77452733eae23cc83078fa29c1e9a (diff) |
Merge tag 'landlock-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mic/linux
Pull landlock updates from Mickaël Salaün:
"This brings two main changes to Landlock:
- A signal scoping fix with a new interface for user space to know if
it is compatible with the running kernel.
- Audit support to give visibility on why access requests are denied,
including the origin of the security policy, missing access rights,
and description of object(s). This was designed to limit log spam
as much as possible while still alerting about unexpected blocked
access.
With these changes come new and improved documentation, and a lot of
new tests"
* tag 'landlock-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mic/linux: (36 commits)
landlock: Add audit documentation
selftests/landlock: Add audit tests for network
selftests/landlock: Add audit tests for filesystem
selftests/landlock: Add audit tests for abstract UNIX socket scoping
selftests/landlock: Add audit tests for ptrace
selftests/landlock: Test audit with restrict flags
selftests/landlock: Add tests for audit flags and domain IDs
selftests/landlock: Extend tests for landlock_restrict_self(2)'s flags
selftests/landlock: Add test for invalid ruleset file descriptor
samples/landlock: Enable users to log sandbox denials
landlock: Add LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
landlock: Add LANDLOCK_RESTRICT_SELF_LOG_*_EXEC_* flags
landlock: Log scoped denials
landlock: Log TCP bind and connect denials
landlock: Log truncate and IOCTL denials
landlock: Factor out IOCTL hooks
landlock: Log file-related denials
landlock: Log mount-related denials
landlock: Add AUDIT_LANDLOCK_DOMAIN and log domain status
landlock: Add AUDIT_LANDLOCK_ACCESS and log ptrace denials
...
Diffstat (limited to 'security')
-rw-r--r-- | security/landlock/.kunitconfig | 2 | ||||
-rw-r--r-- | security/landlock/Makefile | 5 | ||||
-rw-r--r-- | security/landlock/access.h | 25 | ||||
-rw-r--r-- | security/landlock/audit.c | 522 | ||||
-rw-r--r-- | security/landlock/audit.h | 76 | ||||
-rw-r--r-- | security/landlock/cred.c | 28 | ||||
-rw-r--r-- | security/landlock/cred.h | 92 | ||||
-rw-r--r-- | security/landlock/domain.c | 264 | ||||
-rw-r--r-- | security/landlock/domain.h | 174 | ||||
-rw-r--r-- | security/landlock/errata.h | 99 | ||||
-rw-r--r-- | security/landlock/errata/abi-4.h | 15 | ||||
-rw-r--r-- | security/landlock/errata/abi-6.h | 19 | ||||
-rw-r--r-- | security/landlock/fs.c | 321 | ||||
-rw-r--r-- | security/landlock/fs.h | 40 | ||||
-rw-r--r-- | security/landlock/id.c | 251 | ||||
-rw-r--r-- | security/landlock/id.h | 25 | ||||
-rw-r--r-- | security/landlock/limits.h | 7 | ||||
-rw-r--r-- | security/landlock/net.c | 78 | ||||
-rw-r--r-- | security/landlock/ruleset.c | 30 | ||||
-rw-r--r-- | security/landlock/ruleset.h | 48 | ||||
-rw-r--r-- | security/landlock/setup.c | 40 | ||||
-rw-r--r-- | security/landlock/setup.h | 3 | ||||
-rw-r--r-- | security/landlock/syscalls.c | 99 | ||||
-rw-r--r-- | security/landlock/task.c | 257 | ||||
-rw-r--r-- | security/lsm_audit.c | 27 |
25 files changed, 2283 insertions, 264 deletions
diff --git a/security/landlock/.kunitconfig b/security/landlock/.kunitconfig index 03e119466604..f9423f01ac5b 100644 --- a/security/landlock/.kunitconfig +++ b/security/landlock/.kunitconfig @@ -1,4 +1,6 @@ +CONFIG_AUDIT=y CONFIG_KUNIT=y +CONFIG_NET=y CONFIG_SECURITY=y CONFIG_SECURITY_LANDLOCK=y CONFIG_SECURITY_LANDLOCK_KUNIT_TEST=y diff --git a/security/landlock/Makefile b/security/landlock/Makefile index b4538b7cf7d2..3160c2bdac1d 100644 --- a/security/landlock/Makefile +++ b/security/landlock/Makefile @@ -4,3 +4,8 @@ landlock-y := setup.o syscalls.o object.o ruleset.o \ cred.o task.o fs.o landlock-$(CONFIG_INET) += net.o + +landlock-$(CONFIG_AUDIT) += \ + id.o \ + audit.o \ + domain.o diff --git a/security/landlock/access.h b/security/landlock/access.h index 74fd8f399fbd..7961c6630a2d 100644 --- a/security/landlock/access.h +++ b/security/landlock/access.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Access types and helpers + * Landlock - Access types and helpers * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI @@ -28,6 +28,12 @@ LANDLOCK_ACCESS_FS_REFER) /* clang-format on */ +/* clang-format off */ +#define _LANDLOCK_ACCESS_FS_OPTIONAL ( \ + LANDLOCK_ACCESS_FS_TRUNCATE | \ + LANDLOCK_ACCESS_FS_IOCTL_DEV) +/* clang-format on */ + typedef u16 access_mask_t; /* Makes sure all filesystem access rights can be stored. */ @@ -60,6 +66,23 @@ typedef u16 layer_mask_t; /* Makes sure all layers can be checked. */ static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS); +/* + * Tracks domains responsible of a denied access. This is required to avoid + * storing in each object the full layer_masks[] required by update_request(). + */ +typedef u8 deny_masks_t; + +/* + * Makes sure all optional access rights can be tied to a layer index (cf. + * get_deny_mask). + */ +static_assert(BITS_PER_TYPE(deny_masks_t) >= + (HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1) * + HWEIGHT(_LANDLOCK_ACCESS_FS_OPTIONAL))); + +/* LANDLOCK_MAX_NUM_LAYERS must be a power of two (cf. deny_masks_t assert). */ +static_assert(HWEIGHT(LANDLOCK_MAX_NUM_LAYERS) == 1); + /* Upgrades with all initially denied by default access rights. */ static inline struct access_masks landlock_upgrade_handled_access_masks(struct access_masks access_masks) diff --git a/security/landlock/audit.c b/security/landlock/audit.c new file mode 100644 index 000000000000..7e5e0ed0e4e5 --- /dev/null +++ b/security/landlock/audit.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Audit helpers + * + * Copyright © 2023-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/audit.h> +#include <linux/bitops.h> +#include <linux/lsm_audit.h> +#include <linux/pid.h> +#include <uapi/linux/landlock.h> + +#include "access.h" +#include "audit.h" +#include "common.h" +#include "cred.h" +#include "domain.h" +#include "limits.h" +#include "ruleset.h" + +static const char *const fs_access_strings[] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = "fs.execute", + [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = "fs.write_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = "fs.read_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = "fs.read_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = "fs.remove_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_FILE)] = "fs.remove_file", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_CHAR)] = "fs.make_char", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_DIR)] = "fs.make_dir", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = "fs.make_reg", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SOCK)] = "fs.make_sock", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_FIFO)] = "fs.make_fifo", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_BLOCK)] = "fs.make_block", + [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SYM)] = "fs.make_sym", + [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = "fs.refer", + [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = "fs.truncate", + [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = "fs.ioctl_dev", +}; + +static_assert(ARRAY_SIZE(fs_access_strings) == LANDLOCK_NUM_ACCESS_FS); + +static const char *const net_access_strings[] = { + [BIT_INDEX(LANDLOCK_ACCESS_NET_BIND_TCP)] = "net.bind_tcp", + [BIT_INDEX(LANDLOCK_ACCESS_NET_CONNECT_TCP)] = "net.connect_tcp", +}; + +static_assert(ARRAY_SIZE(net_access_strings) == LANDLOCK_NUM_ACCESS_NET); + +static __attribute_const__ const char * +get_blocker(const enum landlock_request_type type, + const unsigned long access_bit) +{ + switch (type) { + case LANDLOCK_REQUEST_PTRACE: + WARN_ON_ONCE(access_bit != -1); + return "ptrace"; + + case LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY: + WARN_ON_ONCE(access_bit != -1); + return "fs.change_topology"; + + case LANDLOCK_REQUEST_FS_ACCESS: + if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(fs_access_strings))) + return "unknown"; + return fs_access_strings[access_bit]; + + case LANDLOCK_REQUEST_NET_ACCESS: + if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(net_access_strings))) + return "unknown"; + return net_access_strings[access_bit]; + + case LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET: + WARN_ON_ONCE(access_bit != -1); + return "scope.abstract_unix_socket"; + + case LANDLOCK_REQUEST_SCOPE_SIGNAL: + WARN_ON_ONCE(access_bit != -1); + return "scope.signal"; + } + + WARN_ON_ONCE(1); + return "unknown"; +} + +static void log_blockers(struct audit_buffer *const ab, + const enum landlock_request_type type, + const access_mask_t access) +{ + const unsigned long access_mask = access; + unsigned long access_bit; + bool is_first = true; + + for_each_set_bit(access_bit, &access_mask, BITS_PER_TYPE(access)) { + audit_log_format(ab, "%s%s", is_first ? "" : ",", + get_blocker(type, access_bit)); + is_first = false; + } + if (is_first) + audit_log_format(ab, "%s", get_blocker(type, -1)); +} + +static void log_domain(struct landlock_hierarchy *const hierarchy) +{ + struct audit_buffer *ab; + + /* Ignores already logged domains. */ + if (READ_ONCE(hierarchy->log_status) == LANDLOCK_LOG_RECORDED) + return; + + /* Uses consistent allocation flags wrt common_lsm_audit(). */ + ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN, + AUDIT_LANDLOCK_DOMAIN); + if (!ab) + return; + + WARN_ON_ONCE(hierarchy->id == 0); + audit_log_format( + ab, + "domain=%llx status=allocated mode=enforcing pid=%d uid=%u exe=", + hierarchy->id, pid_nr(hierarchy->details->pid), + hierarchy->details->uid); + audit_log_untrustedstring(ab, hierarchy->details->exe_path); + audit_log_format(ab, " comm="); + audit_log_untrustedstring(ab, hierarchy->details->comm); + audit_log_end(ab); + + /* + * There may be race condition leading to logging of the same domain + * several times but that is OK. + */ + WRITE_ONCE(hierarchy->log_status, LANDLOCK_LOG_RECORDED); +} + +static struct landlock_hierarchy * +get_hierarchy(const struct landlock_ruleset *const domain, const size_t layer) +{ + struct landlock_hierarchy *hierarchy = domain->hierarchy; + ssize_t i; + + if (WARN_ON_ONCE(layer >= domain->num_layers)) + return hierarchy; + + for (i = domain->num_layers - 1; i > layer; i--) { + if (WARN_ON_ONCE(!hierarchy->parent)) + break; + + hierarchy = hierarchy->parent; + } + + return hierarchy; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_hierarchy(struct kunit *const test) +{ + struct landlock_hierarchy dom0_hierarchy = { + .id = 10, + }; + struct landlock_hierarchy dom1_hierarchy = { + .parent = &dom0_hierarchy, + .id = 20, + }; + struct landlock_hierarchy dom2_hierarchy = { + .parent = &dom1_hierarchy, + .id = 30, + }; + struct landlock_ruleset dom2 = { + .hierarchy = &dom2_hierarchy, + .num_layers = 3, + }; + + KUNIT_EXPECT_EQ(test, 10, get_hierarchy(&dom2, 0)->id); + KUNIT_EXPECT_EQ(test, 20, get_hierarchy(&dom2, 1)->id); + KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, 2)->id); + KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, -1)->id); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static size_t get_denied_layer(const struct landlock_ruleset *const domain, + access_mask_t *const access_request, + const layer_mask_t (*const layer_masks)[], + const size_t layer_masks_size) +{ + const unsigned long access_req = *access_request; + unsigned long access_bit; + access_mask_t missing = 0; + long youngest_layer = -1; + + for_each_set_bit(access_bit, &access_req, layer_masks_size) { + const access_mask_t mask = (*layer_masks)[access_bit]; + long layer; + + if (!mask) + continue; + + /* __fls(1) == 0 */ + layer = __fls(mask); + if (layer > youngest_layer) { + youngest_layer = layer; + missing = BIT(access_bit); + } else if (layer == youngest_layer) { + missing |= BIT(access_bit); + } + } + + *access_request = missing; + if (youngest_layer == -1) + return domain->num_layers - 1; + + return youngest_layer; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_denied_layer(struct kunit *const test) +{ + const struct landlock_ruleset dom = { + .num_layers = 5, + }; + const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT(0), + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT(1), + [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = BIT(1) | BIT(0), + [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = BIT(2), + }; + access_mask_t access; + + access = LANDLOCK_ACCESS_FS_EXECUTE; + KUNIT_EXPECT_EQ(test, 0, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_EXECUTE); + + access = LANDLOCK_ACCESS_FS_READ_FILE; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_FILE); + + access = LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_READ_FILE | LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_EXECUTE | LANDLOCK_ACCESS_FS_READ_DIR; + KUNIT_EXPECT_EQ(test, 1, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR); + + access = LANDLOCK_ACCESS_FS_WRITE_FILE; + KUNIT_EXPECT_EQ(test, 4, + get_denied_layer(&dom, &access, &layer_masks, + sizeof(layer_masks))); + KUNIT_EXPECT_EQ(test, access, 0); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static size_t +get_layer_from_deny_masks(access_mask_t *const access_request, + const access_mask_t all_existing_optional_access, + const deny_masks_t deny_masks) +{ + const unsigned long access_opt = all_existing_optional_access; + const unsigned long access_req = *access_request; + access_mask_t missing = 0; + size_t youngest_layer = 0; + size_t access_index = 0; + unsigned long access_bit; + + /* This will require change with new object types. */ + WARN_ON_ONCE(access_opt != _LANDLOCK_ACCESS_FS_OPTIONAL); + + for_each_set_bit(access_bit, &access_opt, + BITS_PER_TYPE(access_mask_t)) { + if (access_req & BIT(access_bit)) { + const size_t layer = + (deny_masks >> (access_index * 4)) & + (LANDLOCK_MAX_NUM_LAYERS - 1); + + if (layer > youngest_layer) { + youngest_layer = layer; + missing = BIT(access_bit); + } else if (layer == youngest_layer) { + missing |= BIT(access_bit); + } + } + access_index++; + } + + *access_request = missing; + return youngest_layer; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_layer_from_deny_masks(struct kunit *const test) +{ + deny_masks_t deny_mask; + access_mask_t access; + + /* truncate:0 ioctl_dev:2 */ + deny_mask = 0x20; + + access = LANDLOCK_ACCESS_FS_TRUNCATE; + KUNIT_EXPECT_EQ(test, 0, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE); + + access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV; + KUNIT_EXPECT_EQ(test, 2, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_IOCTL_DEV); + + /* truncate:15 ioctl_dev:15 */ + deny_mask = 0xff; + + access = LANDLOCK_ACCESS_FS_TRUNCATE; + KUNIT_EXPECT_EQ(test, 15, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE); + + access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV; + KUNIT_EXPECT_EQ(test, 15, + get_layer_from_deny_masks(&access, + _LANDLOCK_ACCESS_FS_OPTIONAL, + deny_mask)); + KUNIT_EXPECT_EQ(test, access, + LANDLOCK_ACCESS_FS_TRUNCATE | + LANDLOCK_ACCESS_FS_IOCTL_DEV); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +static bool is_valid_request(const struct landlock_request *const request) +{ + if (WARN_ON_ONCE(request->layer_plus_one > LANDLOCK_MAX_NUM_LAYERS)) + return false; + + if (WARN_ON_ONCE(!(!!request->layer_plus_one ^ !!request->access))) + return false; + + if (request->access) { + if (WARN_ON_ONCE(!(!!request->layer_masks ^ + !!request->all_existing_optional_access))) + return false; + } else { + if (WARN_ON_ONCE(request->layer_masks || + request->all_existing_optional_access)) + return false; + } + + if (WARN_ON_ONCE(!!request->layer_masks ^ !!request->layer_masks_size)) + return false; + + if (request->deny_masks) { + if (WARN_ON_ONCE(!request->all_existing_optional_access)) + return false; + } + + return true; +} + +/** + * landlock_log_denial - Create audit records related to a denial + * + * @subject: The Landlock subject's credential denying an action. + * @request: Detail of the user space request. + */ +void landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request) +{ + struct audit_buffer *ab; + struct landlock_hierarchy *youngest_denied; + size_t youngest_layer; + access_mask_t missing; + + if (WARN_ON_ONCE(!subject || !subject->domain || + !subject->domain->hierarchy || !request)) + return; + + if (!is_valid_request(request)) + return; + + missing = request->access; + if (missing) { + /* Gets the nearest domain that denies the request. */ + if (request->layer_masks) { + youngest_layer = get_denied_layer( + subject->domain, &missing, request->layer_masks, + request->layer_masks_size); + } else { + youngest_layer = get_layer_from_deny_masks( + &missing, request->all_existing_optional_access, + request->deny_masks); + } + youngest_denied = + get_hierarchy(subject->domain, youngest_layer); + } else { + youngest_layer = request->layer_plus_one - 1; + youngest_denied = + get_hierarchy(subject->domain, youngest_layer); + } + + if (READ_ONCE(youngest_denied->log_status) == LANDLOCK_LOG_DISABLED) + return; + + /* + * Consistently keeps track of the number of denied access requests + * even if audit is currently disabled, or if audit rules currently + * exclude this record type, or if landlock_restrict_self(2)'s flags + * quiet logs. + */ + atomic64_inc(&youngest_denied->num_denials); + + if (!audit_enabled) + return; + + /* Checks if the current exec was restricting itself. */ + if (subject->domain_exec & (1 << youngest_layer)) { + /* Ignores denials for the same execution. */ + if (!youngest_denied->log_same_exec) + return; + } else { + /* Ignores denials after a new execution. */ + if (!youngest_denied->log_new_exec) + return; + } + + /* Uses consistent allocation flags wrt common_lsm_audit(). */ + ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN, + AUDIT_LANDLOCK_ACCESS); + if (!ab) + return; + + audit_log_format(ab, "domain=%llx blockers=", youngest_denied->id); + log_blockers(ab, request->type, missing); + audit_log_lsm_data(ab, &request->audit); + audit_log_end(ab); + + /* Logs this domain the first time it shows in log. */ + log_domain(youngest_denied); +} + +/** + * landlock_log_drop_domain - Create an audit record on domain deallocation + * + * @hierarchy: The domain's hierarchy being deallocated. + * + * Only domains which previously appeared in the audit logs are logged again. + * This is useful to know when a domain will never show again in the audit log. + * + * Called in a work queue scheduled by landlock_put_ruleset_deferred() called + * by hook_cred_free(). + */ +void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy) +{ + struct audit_buffer *ab; + + if (WARN_ON_ONCE(!hierarchy)) + return; + + if (!audit_enabled) + return; + + /* Ignores domains that were not logged. */ + if (READ_ONCE(hierarchy->log_status) != LANDLOCK_LOG_RECORDED) + return; + + /* + * If logging of domain allocation succeeded, warns about failure to log + * domain deallocation to highlight unbalanced domain lifetime logs. + */ + ab = audit_log_start(audit_context(), GFP_KERNEL, + AUDIT_LANDLOCK_DOMAIN); + if (!ab) + return; + + audit_log_format(ab, "domain=%llx status=deallocated denials=%llu", + hierarchy->id, atomic64_read(&hierarchy->num_denials)); + audit_log_end(ab); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_get_hierarchy), + KUNIT_CASE(test_get_denied_layer), + KUNIT_CASE(test_get_layer_from_deny_masks), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_audit", + .test_cases = test_cases, +}; + +kunit_test_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ diff --git a/security/landlock/audit.h b/security/landlock/audit.h new file mode 100644 index 000000000000..92428b7fc4d8 --- /dev/null +++ b/security/landlock/audit.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Audit helpers + * + * Copyright © 2023-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_AUDIT_H +#define _SECURITY_LANDLOCK_AUDIT_H + +#include <linux/audit.h> +#include <linux/lsm_audit.h> + +#include "access.h" +#include "cred.h" + +enum landlock_request_type { + LANDLOCK_REQUEST_PTRACE = 1, + LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + LANDLOCK_REQUEST_FS_ACCESS, + LANDLOCK_REQUEST_NET_ACCESS, + LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + LANDLOCK_REQUEST_SCOPE_SIGNAL, +}; + +/* + * We should be careful to only use a variable of this type for + * landlock_log_denial(). This way, the compiler can remove it entirely if + * CONFIG_AUDIT is not set. + */ +struct landlock_request { + /* Mandatory fields. */ + enum landlock_request_type type; + struct common_audit_data audit; + + /** + * layer_plus_one: First layer level that denies the request + 1. The + * extra one is useful to detect uninitialized field. + */ + size_t layer_plus_one; + + /* Required field for configurable access control. */ + access_mask_t access; + + /* Required fields for requests with layer masks. */ + const layer_mask_t (*layer_masks)[]; + size_t layer_masks_size; + + /* Required fields for requests with deny masks. */ + const access_mask_t all_existing_optional_access; + deny_masks_t deny_masks; +}; + +#ifdef CONFIG_AUDIT + +void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy); + +void landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request); + +#else /* CONFIG_AUDIT */ + +static inline void +landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy) +{ +} + +static inline void +landlock_log_denial(const struct landlock_cred_security *const subject, + const struct landlock_request *const request) +{ +} + +#endif /* CONFIG_AUDIT */ + +#endif /* _SECURITY_LANDLOCK_AUDIT_H */ diff --git a/security/landlock/cred.c b/security/landlock/cred.c index db9fe7d906ba..0cb3edde4d18 100644 --- a/security/landlock/cred.c +++ b/security/landlock/cred.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Credential hooks + * Landlock - Credential hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ +#include <linux/binfmts.h> #include <linux/cred.h> #include <linux/lsm_hooks.h> @@ -17,11 +19,12 @@ static void hook_cred_transfer(struct cred *const new, const struct cred *const old) { - struct landlock_ruleset *const old_dom = landlock_cred(old)->domain; + const struct landlock_cred_security *const old_llcred = + landlock_cred(old); - if (old_dom) { - landlock_get_ruleset(old_dom); - landlock_cred(new)->domain = old_dom; + if (old_llcred->domain) { + landlock_get_ruleset(old_llcred->domain); + *landlock_cred(new) = *old_llcred; } } @@ -40,10 +43,25 @@ static void hook_cred_free(struct cred *const cred) landlock_put_ruleset_deferred(dom); } +#ifdef CONFIG_AUDIT + +static int hook_bprm_creds_for_exec(struct linux_binprm *const bprm) +{ + /* Resets for each execution. */ + landlock_cred(bprm->cred)->domain_exec = 0; + return 0; +} + +#endif /* CONFIG_AUDIT */ + static struct security_hook_list landlock_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, hook_cred_prepare), LSM_HOOK_INIT(cred_transfer, hook_cred_transfer), LSM_HOOK_INIT(cred_free, hook_cred_free), + +#ifdef CONFIG_AUDIT + LSM_HOOK_INIT(bprm_creds_for_exec, hook_bprm_creds_for_exec), +#endif /* CONFIG_AUDIT */ }; __init void landlock_add_cred_hooks(void) diff --git a/security/landlock/cred.h b/security/landlock/cred.h index bf755459838a..c82fe63ec598 100644 --- a/security/landlock/cred.h +++ b/security/landlock/cred.h @@ -1,24 +1,63 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Credential hooks + * Landlock - Credential hooks * * Copyright © 2019-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2019-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_CRED_H #define _SECURITY_LANDLOCK_CRED_H +#include <linux/container_of.h> #include <linux/cred.h> #include <linux/init.h> #include <linux/rcupdate.h> +#include "access.h" +#include "limits.h" #include "ruleset.h" #include "setup.h" +/** + * struct landlock_cred_security - Credential security blob + * + * This structure is packed to minimize the size of struct + * landlock_file_security. However, it is always aligned in the LSM cred blob, + * see lsm_set_blob_size(). + */ struct landlock_cred_security { + /** + * @domain: Immutable ruleset enforced on a task. + */ struct landlock_ruleset *domain; -}; + +#ifdef CONFIG_AUDIT + /** + * @domain_exec: Bitmask identifying the domain layers that were enforced by + * the current task's executed file (i.e. no new execve(2) since + * landlock_restrict_self(2)). + */ + u16 domain_exec; + /** + * @log_subdomains_off: Set if the domain descendants's log_status should be + * set to %LANDLOCK_LOG_DISABLED. This is not a landlock_hierarchy + * configuration because it applies to future descendant domains and it does + * not require a current domain. + */ + u8 log_subdomains_off : 1; +#endif /* CONFIG_AUDIT */ +} __packed; + +#ifdef CONFIG_AUDIT + +/* Makes sure all layer executions can be stored. */ +static_assert(BITS_PER_TYPE(typeof_member(struct landlock_cred_security, + domain_exec)) >= + LANDLOCK_MAX_NUM_LAYERS); + +#endif /* CONFIG_AUDIT */ static inline struct landlock_cred_security * landlock_cred(const struct cred *cred) @@ -53,6 +92,55 @@ static inline bool landlocked(const struct task_struct *const task) return has_dom; } +/** + * landlock_get_applicable_subject - Return the subject's Landlock credential + * if its enforced domain applies to (i.e. + * handles) at least one of the access rights + * specified in @masks + * + * @cred: credential + * @masks: access masks + * @handle_layer: returned youngest layer handling a subset of @masks. Not set + * if the function returns NULL. + * + * Returns: landlock_cred(@cred) if any access rights specified in @masks is + * handled, or NULL otherwise. + */ +static inline const struct landlock_cred_security * +landlock_get_applicable_subject(const struct cred *const cred, + const struct access_masks masks, + size_t *const handle_layer) +{ + const union access_masks_all masks_all = { + .masks = masks, + }; + const struct landlock_ruleset *domain; + ssize_t layer_level; + + if (!cred) + return NULL; + + domain = landlock_cred(cred)->domain; + if (!domain) + return NULL; + + for (layer_level = domain->num_layers - 1; layer_level >= 0; + layer_level--) { + union access_masks_all layer = { + .masks = domain->access_masks[layer_level], + }; + + if (layer.all & masks_all.all) { + if (handle_layer) + *handle_layer = layer_level; + + return landlock_cred(cred); + } + } + + return NULL; +} + __init void landlock_add_cred_hooks(void); #endif /* _SECURITY_LANDLOCK_CRED_H */ diff --git a/security/landlock/domain.c b/security/landlock/domain.c new file mode 100644 index 000000000000..bae2e9909013 --- /dev/null +++ b/security/landlock/domain.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Domain management + * + * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/cred.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/path.h> +#include <linux/pid.h> +#include <linux/sched.h> +#include <linux/uidgid.h> + +#include "access.h" +#include "common.h" +#include "domain.h" +#include "id.h" + +#ifdef CONFIG_AUDIT + +/** + * get_current_exe - Get the current's executable path, if any + * + * @exe_str: Returned pointer to a path string with a lifetime tied to the + * returned buffer, if any. + * @exe_size: Returned size of @exe_str (including the trailing null + * character), if any. + * + * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if + * there is no executable path, or an error otherwise. + */ +static const void *get_current_exe(const char **const exe_str, + size_t *const exe_size) +{ + const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; + struct mm_struct *mm = current->mm; + struct file *file __free(fput) = NULL; + char *buffer __free(kfree) = NULL; + const char *exe; + ssize_t size; + + if (!mm) + return NULL; + + file = get_mm_exe_file(mm); + if (!file) + return NULL; + + buffer = kmalloc(buffer_size, GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + exe = d_path(&file->f_path, buffer, buffer_size); + if (WARN_ON_ONCE(IS_ERR(exe))) + /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ + return ERR_CAST(exe); + + size = buffer + buffer_size - exe; + if (WARN_ON_ONCE(size <= 0)) + return ERR_PTR(-ENAMETOOLONG); + + *exe_size = size; + *exe_str = exe; + return no_free_ptr(buffer); +} + +/* + * Returns: A newly allocated object describing a domain, or an error + * otherwise. + */ +static struct landlock_details *get_current_details(void) +{ + /* Cf. audit_log_d_path_exe() */ + static const char null_path[] = "(null)"; + const char *path_str = null_path; + size_t path_size = sizeof(null_path); + const void *buffer __free(kfree) = NULL; + struct landlock_details *details; + + buffer = get_current_exe(&path_str, &path_size); + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + /* + * Create the new details according to the path's length. Do not + * allocate with GFP_KERNEL_ACCOUNT because it is independent from the + * caller. + */ + details = + kzalloc(struct_size(details, exe_path, path_size), GFP_KERNEL); + if (!details) + return ERR_PTR(-ENOMEM); + + memcpy(details->exe_path, path_str, path_size); + WARN_ON_ONCE(current_cred() != current_real_cred()); + details->pid = get_pid(task_pid(current)); + details->uid = from_kuid(&init_user_ns, current_uid()); + get_task_comm(details->comm, current); + return details; +} + +/** + * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy + * + * @hierarchy: The hierarchy to initialize. + * + * The current task is referenced as the domain that is enforcing the + * restriction. The subjective credentials must not be in an overridden state. + * + * @hierarchy->parent and @hierarchy->usage should already be set. + */ +int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) +{ + struct landlock_details *details; + + details = get_current_details(); + if (IS_ERR(details)) + return PTR_ERR(details); + + hierarchy->details = details; + hierarchy->id = landlock_get_id_range(1); + hierarchy->log_status = LANDLOCK_LOG_PENDING; + hierarchy->log_same_exec = true; + hierarchy->log_new_exec = false; + atomic64_set(&hierarchy->num_denials, 0); + return 0; +} + +static deny_masks_t +get_layer_deny_mask(const access_mask_t all_existing_optional_access, + const unsigned long access_bit, const size_t layer) +{ + unsigned long access_weight; + + /* This may require change with new object types. */ + WARN_ON_ONCE(all_existing_optional_access != + _LANDLOCK_ACCESS_FS_OPTIONAL); + + if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) + return 0; + + access_weight = hweight_long(all_existing_optional_access & + GENMASK(access_bit, 0)); + if (WARN_ON_ONCE(access_weight < 1)) + return 0; + + return layer + << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_get_layer_deny_mask(struct kunit *const test) +{ + const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); + const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); + + KUNIT_EXPECT_EQ(test, 0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + truncate, 0)); + KUNIT_EXPECT_EQ(test, 0x3, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + truncate, 3)); + + KUNIT_EXPECT_EQ(test, 0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + ioctl_dev, 0)); + KUNIT_EXPECT_EQ(test, 0xf0, + get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, + ioctl_dev, 15)); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +deny_masks_t +landlock_get_deny_masks(const access_mask_t all_existing_optional_access, + const access_mask_t optional_access, + const layer_mask_t (*const layer_masks)[], + const size_t layer_masks_size) +{ + const unsigned long access_opt = optional_access; + unsigned long access_bit; + deny_masks_t deny_masks = 0; + + /* This may require change with new object types. */ + WARN_ON_ONCE(access_opt != + (optional_access & all_existing_optional_access)); + + if (WARN_ON_ONCE(!layer_masks)) + return 0; + + if (WARN_ON_ONCE(!access_opt)) + return 0; + + for_each_set_bit(access_bit, &access_opt, layer_masks_size) { + const layer_mask_t mask = (*layer_masks)[access_bit]; + + if (!mask) + continue; + + /* __fls(1) == 0 */ + deny_masks |= get_layer_deny_mask(all_existing_optional_access, + access_bit, __fls(mask)); + } + return deny_masks; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_landlock_get_deny_masks(struct kunit *const test) +{ + const layer_mask_t layers1[BITS_PER_TYPE(access_mask_t)] = { + [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) | + BIT_ULL(9), + [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = BIT_ULL(1), + [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = BIT_ULL(2) | + BIT_ULL(0), + }; + + KUNIT_EXPECT_EQ(test, 0x1, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_TRUNCATE, + &layers1, ARRAY_SIZE(layers1))); + KUNIT_EXPECT_EQ(test, 0x20, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_IOCTL_DEV, + &layers1, ARRAY_SIZE(layers1))); + KUNIT_EXPECT_EQ( + test, 0x21, + landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, + LANDLOCK_ACCESS_FS_TRUNCATE | + LANDLOCK_ACCESS_FS_IOCTL_DEV, + &layers1, ARRAY_SIZE(layers1))); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_get_layer_deny_mask), + KUNIT_CASE(test_landlock_get_deny_masks), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_domain", + .test_cases = test_cases, +}; + +kunit_test_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +#endif /* CONFIG_AUDIT */ diff --git a/security/landlock/domain.h b/security/landlock/domain.h new file mode 100644 index 000000000000..ed0d348e214c --- /dev/null +++ b/security/landlock/domain.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Domain management + * + * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_DOMAIN_H +#define _SECURITY_LANDLOCK_DOMAIN_H + +#include <linux/limits.h> +#include <linux/mm.h> +#include <linux/path.h> +#include <linux/pid.h> +#include <linux/refcount.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "access.h" +#include "audit.h" + +enum landlock_log_status { + LANDLOCK_LOG_PENDING = 0, + LANDLOCK_LOG_RECORDED, + LANDLOCK_LOG_DISABLED, +}; + +/** + * struct landlock_details - Domain's creation information + * + * Rarely accessed, mainly when logging the first domain's denial. + * + * The contained pointers are initialized at the domain creation time and never + * changed again. Contrary to most other Landlock object types, this one is + * not allocated with GFP_KERNEL_ACCOUNT because its size may not be under the + * caller's control (e.g. unknown exe_path) and the data is not explicitly + * requested nor used by tasks. + */ +struct landlock_details { + /** + * @pid: PID of the task that initially restricted itself. It still + * identifies the same task. Keeping a reference to this PID ensures that + * it will not be recycled. + */ + struct pid *pid; + /** + * @uid: UID of the task that initially restricted itself, at creation time. + */ + uid_t uid; + /** + * @comm: Command line of the task that initially restricted itself, at + * creation time. Always NULL terminated. + */ + char comm[TASK_COMM_LEN]; + /** + * @exe_path: Executable path of the task that initially restricted + * itself, at creation time. Always NULL terminated, and never greater + * than LANDLOCK_PATH_MAX_SIZE. + */ + char exe_path[]; +}; + +/* Adds 11 extra characters for the potential " (deleted)" suffix. */ +#define LANDLOCK_PATH_MAX_SIZE (PATH_MAX + 11) + +/* Makes sure the greatest landlock_details can be allocated. */ +static_assert(struct_size_t(struct landlock_details, exe_path, + LANDLOCK_PATH_MAX_SIZE) <= KMALLOC_MAX_SIZE); + +/** + * struct landlock_hierarchy - Node in a domain hierarchy + */ +struct landlock_hierarchy { + /** + * @parent: Pointer to the parent node, or NULL if it is a root + * Landlock domain. + */ + struct landlock_hierarchy *parent; + /** + * @usage: Number of potential children domains plus their parent + * domain. + */ + refcount_t usage; + +#ifdef CONFIG_AUDIT + /** + * @log_status: Whether this domain should be logged or not. Because + * concurrent log entries may be created at the same time, it is still + * possible to have several domain records of the same domain. + */ + enum landlock_log_status log_status; + /** + * @num_denials: Number of access requests denied by this domain. + * Masked (i.e. never logged) denials are still counted. + */ + atomic64_t num_denials; + /** + * @id: Landlock domain ID, sets once at domain creation time. + */ + u64 id; + /** + * @details: Information about the related domain. + */ + const struct landlock_details *details; + /** + * @log_same_exec: Set if the domain is *not* configured with + * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF. Set to true by default. + */ + u32 log_same_exec : 1, + /** + * @log_new_exec: Set if the domain is configured with + * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON. Set to false by default. + */ + log_new_exec : 1; +#endif /* CONFIG_AUDIT */ +}; + +#ifdef CONFIG_AUDIT + +deny_masks_t +landlock_get_deny_masks(const access_mask_t all_existing_optional_access, + const access_mask_t optional_access, + const layer_mask_t (*const layer_masks)[], + size_t layer_masks_size); + +int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy); + +static inline void +landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy) +{ + if (WARN_ON_ONCE(!hierarchy || !hierarchy->details)) + return; + + put_pid(hierarchy->details->pid); + kfree(hierarchy->details); +} + +#else /* CONFIG_AUDIT */ + +static inline int +landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) +{ + return 0; +} + +static inline void +landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy) +{ +} + +#endif /* CONFIG_AUDIT */ + +static inline void +landlock_get_hierarchy(struct landlock_hierarchy *const hierarchy) +{ + if (hierarchy) + refcount_inc(&hierarchy->usage); +} + +static inline void landlock_put_hierarchy(struct landlock_hierarchy *hierarchy) +{ + while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { + const struct landlock_hierarchy *const freeme = hierarchy; + + landlock_log_drop_domain(hierarchy); + landlock_free_hierarchy_details(hierarchy); + hierarchy = hierarchy->parent; + kfree(freeme); + } +} + +#endif /* _SECURITY_LANDLOCK_DOMAIN_H */ diff --git a/security/landlock/errata.h b/security/landlock/errata.h new file mode 100644 index 000000000000..8e626accac10 --- /dev/null +++ b/security/landlock/errata.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Errata information + * + * Copyright © 2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ERRATA_H +#define _SECURITY_LANDLOCK_ERRATA_H + +#include <linux/init.h> + +struct landlock_erratum { + const int abi; + const u8 number; +}; + +/* clang-format off */ +#define LANDLOCK_ERRATUM(NUMBER) \ + { \ + .abi = LANDLOCK_ERRATA_ABI, \ + .number = NUMBER, \ + }, +/* clang-format on */ + +/* + * Some fixes may require user space to check if they are applied on the running + * kernel before using a specific feature. For instance, this applies when a + * restriction was previously too restrictive and is now getting relaxed (for + * compatibility or semantic reasons). However, non-visible changes for + * legitimate use (e.g. security fixes) do not require an erratum. + */ +static const struct landlock_erratum landlock_errata_init[] __initconst = { + +/* + * Only Sparse may not implement __has_include. If a compiler does not + * implement __has_include, a warning will be printed at boot time (see + * setup.c). + */ +#ifdef __has_include + +#define LANDLOCK_ERRATA_ABI 1 +#if __has_include("errata/abi-1.h") +#include "errata/abi-1.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 2 +#if __has_include("errata/abi-2.h") +#include "errata/abi-2.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 3 +#if __has_include("errata/abi-3.h") +#include "errata/abi-3.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 4 +#if __has_include("errata/abi-4.h") +#include "errata/abi-4.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 5 +#if __has_include("errata/abi-5.h") +#include "errata/abi-5.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 6 +#if __has_include("errata/abi-6.h") +#include "errata/abi-6.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +/* + * For each new erratum, we need to include all the ABI files up to the impacted + * ABI to make all potential future intermediate errata easy to backport. + * + * If such change involves more than one ABI addition, then it must be in a + * dedicated commit with the same Fixes tag as used for the actual fix. + * + * Each commit creating a new security/landlock/errata/abi-*.h file must have a + * Depends-on tag to reference the commit that previously added the line to + * include this new file, except if the original Fixes tag is enough. + * + * Each erratum must be documented in its related ABI file, and a dedicated + * commit must update Documentation/userspace-api/landlock.rst to include this + * erratum. This commit will not be backported. + */ + +#endif + + {} +}; + +#endif /* _SECURITY_LANDLOCK_ERRATA_H */ diff --git a/security/landlock/errata/abi-4.h b/security/landlock/errata/abi-4.h new file mode 100644 index 000000000000..c052ee54f89f --- /dev/null +++ b/security/landlock/errata/abi-4.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_1 + * + * Erratum 1: TCP socket identification + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC, + * MPTCP, or SCTP) were incorrectly restricted by TCP access rights during + * :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures + * that only TCP sockets are subject to TCP access rights, allowing other + * protocols to operate without unnecessary restrictions. + */ +LANDLOCK_ERRATUM(1) diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h new file mode 100644 index 000000000000..df7bc0e1fdf4 --- /dev/null +++ b/security/landlock/errata/abi-6.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_2 + * + * Erratum 2: Scoped signal handling + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where signal scoping was overly restrictive, + * preventing sandboxed threads from signaling other threads within the same + * process if they belonged to different domains. Because threads are not + * security boundaries, user space might assume that any thread within the same + * process can send signals between themselves (see :manpage:`nptl(7)` and + * :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct + * interaction between threads of the same process should always be allowed. + * This change ensures that any thread is allowed to send signals to any other + * thread within the same process, regardless of their domain. + */ +LANDLOCK_ERRATUM(2) diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 582769ae830e..6fee7c20f64d 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Filesystem management and hooks + * Landlock - Filesystem management and hooks * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI - * Copyright © 2021-2022 Microsoft Corporation + * Copyright © 2021-2025 Microsoft Corporation * Copyright © 2022 Günther Noack <gnoack3000@gmail.com> * Copyright © 2023-2024 Google LLC */ @@ -23,11 +23,14 @@ #include <linux/kernel.h> #include <linux/limits.h> #include <linux/list.h> +#include <linux/lsm_audit.h> #include <linux/lsm_hooks.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/path.h> +#include <linux/pid.h> #include <linux/rcupdate.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/types.h> @@ -37,8 +40,10 @@ #include <uapi/linux/landlock.h> #include "access.h" +#include "audit.h" #include "common.h" #include "cred.h" +#include "domain.h" #include "fs.h" #include "limits.h" #include "object.h" @@ -393,12 +398,6 @@ static const struct access_masks any_fs = { .fs = ~0, }; -static const struct landlock_ruleset *get_current_fs_domain(void) -{ - return landlock_get_applicable_domain(landlock_get_current_domain(), - any_fs); -} - /* * Check that a destination file hierarchy has more restrictions than a source * file hierarchy. This is only used for link and rename actions. @@ -728,6 +727,7 @@ static void test_is_eacces_with_write(struct kunit *const test) * those identified by @access_request_parent1). This matrix can * initially refer to domain layer masks and, when the accesses for the * destination and source are the same, to requested layer masks. + * @log_request_parent1: Audit request to fill if the related access is denied. * @dentry_child1: Dentry to the initial child of the parent1 path. This * pointer must be NULL for non-refer actions (i.e. not link nor rename). * @access_request_parent2: Similar to @access_request_parent1 but for a @@ -736,6 +736,7 @@ static void test_is_eacces_with_write(struct kunit *const test) * the source. Must be set to 0 when using a simple path request. * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer * action. This must be NULL otherwise. + * @log_request_parent2: Audit request to fill if the related access is denied. * @dentry_child2: Dentry to the initial child of the parent2 path. This * pointer is only set for RENAME_EXCHANGE actions and must be NULL * otherwise. @@ -755,10 +756,12 @@ static bool is_access_to_paths_allowed( const struct path *const path, const access_mask_t access_request_parent1, layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], - const struct dentry *const dentry_child1, + struct landlock_request *const log_request_parent1, + struct dentry *const dentry_child1, const access_mask_t access_request_parent2, layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], - const struct dentry *const dentry_child2) + struct landlock_request *const log_request_parent2, + struct dentry *const dentry_child2) { bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check, child1_is_directory = true, child2_is_directory = true; @@ -771,11 +774,14 @@ static bool is_access_to_paths_allowed( if (!access_request_parent1 && !access_request_parent2) return true; - if (WARN_ON_ONCE(!domain || !path)) + + if (WARN_ON_ONCE(!path)) return true; + if (is_nouser_or_private(path->dentry)) return true; - if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1)) + + if (WARN_ON_ONCE(!layer_masks_parent1)) return false; allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1); @@ -920,24 +926,51 @@ jump_up: } path_put(&walker_path); + if (!allowed_parent1) { + log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS; + log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH; + log_request_parent1->audit.u.path = *path; + log_request_parent1->access = access_masked_parent1; + log_request_parent1->layer_masks = layer_masks_parent1; + log_request_parent1->layer_masks_size = + ARRAY_SIZE(*layer_masks_parent1); + } + + if (!allowed_parent2) { + log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS; + log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH; + log_request_parent2->audit.u.path = *path; + log_request_parent2->access = access_masked_parent2; + log_request_parent2->layer_masks = layer_masks_parent2; + log_request_parent2->layer_masks_size = + ARRAY_SIZE(*layer_masks_parent2); + } return allowed_parent1 && allowed_parent2; } static int current_check_access_path(const struct path *const path, access_mask_t access_request) { - const struct landlock_ruleset *const dom = get_current_fs_domain(); + const struct access_masks masks = { + .fs = access_request, + }; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), masks, NULL); layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; + struct landlock_request request = {}; - if (!dom) + if (!subject) return 0; - access_request = landlock_init_layer_masks( - dom, access_request, &layer_masks, LANDLOCK_KEY_INODE); - if (is_access_to_paths_allowed(dom, path, access_request, &layer_masks, - NULL, 0, NULL, NULL)) + access_request = landlock_init_layer_masks(subject->domain, + access_request, &layer_masks, + LANDLOCK_KEY_INODE); + if (is_access_to_paths_allowed(subject->domain, path, access_request, + &layer_masks, &request, NULL, 0, NULL, + NULL, NULL)) return 0; + landlock_log_denial(subject, &request); return -EACCES; } @@ -1098,18 +1131,19 @@ static int current_check_refer_path(struct dentry *const old_dentry, struct dentry *const new_dentry, const bool removable, const bool exchange) { - const struct landlock_ruleset *const dom = get_current_fs_domain(); + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, NULL); bool allow_parent1, allow_parent2; access_mask_t access_request_parent1, access_request_parent2; struct path mnt_dir; struct dentry *old_parent; layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {}, layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {}; + struct landlock_request request1 = {}, request2 = {}; - if (!dom) + if (!subject) return 0; - if (WARN_ON_ONCE(dom->num_layers < 1)) - return -EACCES; + if (unlikely(d_is_negative(old_dentry))) return -ENOENT; if (exchange) { @@ -1134,12 +1168,16 @@ static int current_check_refer_path(struct dentry *const old_dentry, * for same-directory referer (i.e. no reparenting). */ access_request_parent1 = landlock_init_layer_masks( - dom, access_request_parent1 | access_request_parent2, + subject->domain, + access_request_parent1 | access_request_parent2, &layer_masks_parent1, LANDLOCK_KEY_INODE); - if (is_access_to_paths_allowed( - dom, new_dir, access_request_parent1, - &layer_masks_parent1, NULL, 0, NULL, NULL)) + if (is_access_to_paths_allowed(subject->domain, new_dir, + access_request_parent1, + &layer_masks_parent1, &request1, + NULL, 0, NULL, NULL, NULL)) return 0; + + landlock_log_denial(subject, &request1); return -EACCES; } @@ -1160,10 +1198,12 @@ static int current_check_refer_path(struct dentry *const old_dentry, old_dentry->d_parent; /* new_dir->dentry is equal to new_dentry->d_parent */ - allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent, + allow_parent1 = collect_domain_accesses(subject->domain, mnt_dir.dentry, + old_parent, &layer_masks_parent1); - allow_parent2 = collect_domain_accesses( - dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2); + allow_parent2 = collect_domain_accesses(subject->domain, mnt_dir.dentry, + new_dir->dentry, + &layer_masks_parent2); if (allow_parent1 && allow_parent2) return 0; @@ -1175,11 +1215,21 @@ static int current_check_refer_path(struct dentry *const old_dentry, * destination parent access rights. */ if (is_access_to_paths_allowed( - dom, &mnt_dir, access_request_parent1, &layer_masks_parent1, - old_dentry, access_request_parent2, &layer_masks_parent2, + subject->domain, &mnt_dir, access_request_parent1, + &layer_masks_parent1, &request1, old_dentry, + access_request_parent2, &layer_masks_parent2, &request2, exchange ? new_dentry : NULL)) return 0; + if (request1.access) { + request1.audit.u.path.dentry = old_parent; + landlock_log_denial(subject, &request1); + } + if (request2.access) { + request2.audit.u.path.dentry = new_dir->dentry; + landlock_log_denial(subject, &request2); + } + /* * This prioritizes EACCES over EXDEV for all actions, including * renames with RENAME_EXCHANGE. @@ -1322,6 +1372,34 @@ static void hook_sb_delete(struct super_block *const sb) !atomic_long_read(&landlock_superblock(sb)->inode_refs)); } +static void +log_fs_change_topology_path(const struct landlock_cred_security *const subject, + size_t handle_layer, const struct path *const path) +{ + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + .audit = { + .type = LSM_AUDIT_DATA_PATH, + .u.path = *path, + }, + .layer_plus_one = handle_layer + 1, + }); +} + +static void log_fs_change_topology_dentry( + const struct landlock_cred_security *const subject, size_t handle_layer, + struct dentry *const dentry) +{ + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY, + .audit = { + .type = LSM_AUDIT_DATA_DENTRY, + .u.dentry = dentry, + }, + .layer_plus_one = handle_layer + 1, + }); +} + /* * Because a Landlock security policy is defined according to the filesystem * topology (i.e. the mount namespace), changing it may grant access to files @@ -1344,16 +1422,30 @@ static int hook_sb_mount(const char *const dev_name, const struct path *const path, const char *const type, const unsigned long flags, void *const data) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, path); return -EPERM; } static int hook_move_mount(const struct path *const from_path, const struct path *const to_path) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, to_path); return -EPERM; } @@ -1363,15 +1455,29 @@ static int hook_move_mount(const struct path *const from_path, */ static int hook_sb_umount(struct vfsmount *const mnt, const int flags) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_dentry(subject, handle_layer, mnt->mnt_root); return -EPERM; } static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_dentry(subject, handle_layer, sb->s_root); return -EPERM; } @@ -1386,8 +1492,15 @@ static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts) static int hook_sb_pivotroot(const struct path *const old_path, const struct path *const new_path) { - if (!get_current_fs_domain()) + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), any_fs, + &handle_layer); + + if (!subject) return 0; + + log_fs_change_topology_path(subject, handle_layer, new_path); return -EPERM; } @@ -1504,11 +1617,11 @@ static int hook_file_open(struct file *const file) layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; access_mask_t open_access_request, full_access_request, allowed_access, optional_access; - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain( - landlock_cred(file->f_cred)->domain, any_fs); + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(file->f_cred, any_fs, NULL); + struct landlock_request request = {}; - if (!dom) + if (!subject) return 0; /* @@ -1529,10 +1642,11 @@ static int hook_file_open(struct file *const file) full_access_request = open_access_request | optional_access; if (is_access_to_paths_allowed( - dom, &file->f_path, - landlock_init_layer_masks(dom, full_access_request, - &layer_masks, LANDLOCK_KEY_INODE), - &layer_masks, NULL, 0, NULL, NULL)) { + subject->domain, &file->f_path, + landlock_init_layer_masks(subject->domain, + full_access_request, &layer_masks, + LANDLOCK_KEY_INODE), + &layer_masks, &request, NULL, 0, NULL, NULL, NULL)) { allowed_access = full_access_request; } else { unsigned long access_bit; @@ -1558,10 +1672,18 @@ static int hook_file_open(struct file *const file) * file access rights in the opened struct file. */ landlock_file(file)->allowed_access = allowed_access; +#ifdef CONFIG_AUDIT + landlock_file(file)->deny_masks = landlock_get_deny_masks( + _LANDLOCK_ACCESS_FS_OPTIONAL, optional_access, &layer_masks, + ARRAY_SIZE(layer_masks)); +#endif /* CONFIG_AUDIT */ if ((open_access_request & allowed_access) == open_access_request) return 0; + /* Sets access to reflect the actual request. */ + request.access = open_access_request; + landlock_log_denial(subject, &request); return -EACCES; } @@ -1579,11 +1701,24 @@ static int hook_file_truncate(struct file *const file) */ if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE) return 0; + + landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_ACCESS, + .audit = { + .type = LSM_AUDIT_DATA_FILE, + .u.file = file, + }, + .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL, + .access = LANDLOCK_ACCESS_FS_TRUNCATE, +#ifdef CONFIG_AUDIT + .deny_masks = landlock_file(file)->deny_masks, +#endif /* CONFIG_AUDIT */ + }); return -EACCES; } -static int hook_file_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static int hook_file_ioctl_common(const struct file *const file, + const unsigned int cmd, const bool is_compat) { access_mask_t allowed_access = landlock_file(file)->allowed_access; @@ -1599,56 +1734,98 @@ static int hook_file_ioctl(struct file *file, unsigned int cmd, if (!is_device(file)) return 0; - if (is_masked_device_ioctl(cmd)) + if (unlikely(is_compat) ? is_masked_device_ioctl_compat(cmd) : + is_masked_device_ioctl(cmd)) return 0; + landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) { + .type = LANDLOCK_REQUEST_FS_ACCESS, + .audit = { + .type = LSM_AUDIT_DATA_IOCTL_OP, + .u.op = &(struct lsm_ioctlop_audit) { + .path = file->f_path, + .cmd = cmd, + }, + }, + .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL, + .access = LANDLOCK_ACCESS_FS_IOCTL_DEV, +#ifdef CONFIG_AUDIT + .deny_masks = landlock_file(file)->deny_masks, +#endif /* CONFIG_AUDIT */ + }); return -EACCES; } +static int hook_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return hook_file_ioctl_common(file, cmd, false); +} + static int hook_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { - access_mask_t allowed_access = landlock_file(file)->allowed_access; + return hook_file_ioctl_common(file, cmd, true); +} + +/* + * Always allow sending signals between threads of the same process. This + * ensures consistency with hook_task_kill(). + */ +static bool control_current_fowner(struct fown_struct *const fown) +{ + struct task_struct *p; /* - * It is the access rights at the time of opening the file which - * determine whether IOCTL can be used on the opened file later. - * - * The access right is attached to the opened file in hook_file_open(). + * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix + * file_set_fowner LSM hook inconsistencies"). */ - if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV) - return 0; + lockdep_assert_held(&fown->lock); - if (!is_device(file)) - return 0; - - if (is_masked_device_ioctl_compat(cmd)) - return 0; + /* + * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side + * critical section. + */ + guard(rcu)(); + p = pid_task(fown->pid, fown->pid_type); + if (!p) + return true; - return -EACCES; + return !same_thread_group(p, current); } static void hook_file_set_fowner(struct file *file) { - struct landlock_ruleset *new_dom, *prev_dom; + struct landlock_ruleset *prev_dom; + struct landlock_cred_security fown_subject = {}; + size_t fown_layer = 0; + + if (control_current_fowner(file_f_owner(file))) { + static const struct access_masks signal_scope = { + .scope = LANDLOCK_SCOPE_SIGNAL, + }; + const struct landlock_cred_security *new_subject = + landlock_get_applicable_subject( + current_cred(), signal_scope, &fown_layer); + if (new_subject) { + landlock_get_ruleset(new_subject->domain); + fown_subject = *new_subject; + } + } - /* - * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix - * file_set_fowner LSM hook inconsistencies"). - */ - lockdep_assert_held(&file_f_owner(file)->lock); - new_dom = landlock_get_current_domain(); - landlock_get_ruleset(new_dom); - prev_dom = landlock_file(file)->fown_domain; - landlock_file(file)->fown_domain = new_dom; + prev_dom = landlock_file(file)->fown_subject.domain; + landlock_file(file)->fown_subject = fown_subject; +#ifdef CONFIG_AUDIT + landlock_file(file)->fown_layer = fown_layer; +#endif /* CONFIG_AUDIT*/ - /* Called in an RCU read-side critical section. */ + /* May be called in an RCU read-side critical section. */ landlock_put_ruleset_deferred(prev_dom); } static void hook_file_free_security(struct file *file) { - landlock_put_ruleset_deferred(landlock_file(file)->fown_domain); + landlock_put_ruleset_deferred(landlock_file(file)->fown_subject.domain); } static struct security_hook_list landlock_hooks[] __ro_after_init = { diff --git a/security/landlock/fs.h b/security/landlock/fs.h index d445f411c26a..bf9948941f2f 100644 --- a/security/landlock/fs.h +++ b/security/landlock/fs.h @@ -1,19 +1,22 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Filesystem management and hooks + * Landlock - Filesystem management and hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_FS_H #define _SECURITY_LANDLOCK_FS_H +#include <linux/build_bug.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/rcupdate.h> #include "access.h" +#include "cred.h" #include "ruleset.h" #include "setup.h" @@ -53,15 +56,40 @@ struct landlock_file_security { * needed to authorize later operations on the open file. */ access_mask_t allowed_access; + +#ifdef CONFIG_AUDIT + /** + * @deny_masks: Domain layer levels that deny an optional access (see + * _LANDLOCK_ACCESS_FS_OPTIONAL). + */ + deny_masks_t deny_masks; /** - * @fown_domain: Domain of the task that set the PID that may receive a - * signal e.g., SIGURG when writing MSG_OOB to the related socket. - * This pointer is protected by the related file->f_owner->lock, as for - * fown_struct's members: pid, uid, and euid. + * @fown_layer: Layer level of @fown_subject->domain with + * LANDLOCK_SCOPE_SIGNAL. */ - struct landlock_ruleset *fown_domain; + u8 fown_layer; +#endif /* CONFIG_AUDIT */ + + /** + * @fown_subject: Landlock credential of the task that set the PID that + * may receive a signal e.g., SIGURG when writing MSG_OOB to the + * related socket. This pointer is protected by the related + * file->f_owner->lock, as for fown_struct's members: pid, uid, and + * euid. + */ + struct landlock_cred_security fown_subject; }; +#ifdef CONFIG_AUDIT + +/* Makes sure all layers can be identified. */ +/* clang-format off */ +static_assert((typeof_member(struct landlock_file_security, fown_layer))~0 >= + LANDLOCK_MAX_NUM_LAYERS); +/* clang-format off */ + +#endif /* CONFIG_AUDIT */ + /** * struct landlock_superblock_security - Superblock security blob * diff --git a/security/landlock/id.c b/security/landlock/id.c new file mode 100644 index 000000000000..11fab9259c15 --- /dev/null +++ b/security/landlock/id.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock - Unique identification number generator + * + * Copyright © 2024-2025 Microsoft Corporation + */ + +#include <kunit/test.h> +#include <linux/atomic.h> +#include <linux/random.h> +#include <linux/spinlock.h> + +#include "common.h" +#include "id.h" + +#define COUNTER_PRE_INIT 0 + +static atomic64_t next_id = ATOMIC64_INIT(COUNTER_PRE_INIT); + +static void __init init_id(atomic64_t *const counter, const u32 random_32bits) +{ + u64 init; + + /* + * Ensures sure 64-bit values are always used by user space (or may + * fail with -EOVERFLOW), and makes this testable. + */ + init = 1ULL << 32; + + /* + * Makes a large (2^32) boot-time value to limit ID collision in logs + * from different boots, and to limit info leak about the number of + * initially (relative to the reader) created elements (e.g. domains). + */ + init += random_32bits; + + /* Sets first or ignores. This will be the first ID. */ + atomic64_cmpxchg(counter, COUNTER_PRE_INIT, init); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void __init test_init_min(struct kunit *const test) +{ + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, 0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1ULL + U32_MAX); +} + +static void __init test_init_max(struct kunit *const test) +{ + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, ~0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1 + (2ULL * U32_MAX)); +} + +static void __init test_init_once(struct kunit *const test) +{ + const u64 first_init = 1ULL + U32_MAX; + atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); + + init_id(&counter, 0); + KUNIT_EXPECT_EQ(test, atomic64_read(&counter), first_init); + + init_id(&counter, ~0); + KUNIT_EXPECT_EQ_MSG( + test, atomic64_read(&counter), first_init, + "Should still have the same value after the subsequent init_id()"); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +void __init landlock_init_id(void) +{ + return init_id(&next_id, get_random_u32()); +} + +/* + * It's not worth it to try to hide the monotonic counter because it can still + * be inferred (with N counter ranges), and if we are allowed to read the inode + * number we should also be allowed to read the time creation anyway, and it + * can be handy to store and sort domain IDs for user space. + * + * Returns the value of next_id and increment it to let some space for the next + * one. + */ +static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter, + u8 random_4bits) +{ + u64 id, step; + + /* + * We should return at least 1 ID, and we may need a set of consecutive + * ones (e.g. to generate a set of inodes). + */ + if (WARN_ON_ONCE(number_of_ids <= 0)) + number_of_ids = 1; + + /* + * Blurs the next ID guess with 1/16 ratio. We get 2^(64 - 4) - + * (2 * 2^32), so a bit less than 2^60 available IDs, which should be + * much more than enough considering the number of CPU cycles required + * to get a new ID (e.g. a full landlock_restrict_self() call), and the + * cost of draining all available IDs during the system's uptime. + */ + random_4bits = random_4bits % (1 << 4); + step = number_of_ids + random_4bits; + + /* It is safe to cast a signed atomic to an unsigned value. */ + id = atomic64_fetch_add(step, counter); + + /* Warns if landlock_init_id() was not called. */ + WARN_ON_ONCE(id == COUNTER_PRE_INIT); + return id; +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static void test_range1_rand0(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 0), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 1); +} + +static void test_range1_rand1(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 1), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 2); +} + +static void test_range1_rand16(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 16), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 1); +} + +static void test_range2_rand0(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 0), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 2); +} + +static void test_range2_rand1(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 1), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 3); +} + +static void test_range2_rand2(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 2), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 4); +} + +static void test_range2_rand16(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 16), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 2); +} + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ + +/** + * landlock_get_id_range - Get a range of unique IDs + * + * @number_of_ids: Number of IDs to hold. Must be greater than one. + * + * Returns: The first ID in the range. + */ +u64 landlock_get_id_range(size_t number_of_ids) +{ + return get_id_range(number_of_ids, &next_id, get_random_u8()); +} + +#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST + +static struct kunit_case __refdata test_cases[] = { + /* clang-format off */ + KUNIT_CASE(test_init_min), + KUNIT_CASE(test_init_max), + KUNIT_CASE(test_init_once), + KUNIT_CASE(test_range1_rand0), + KUNIT_CASE(test_range1_rand1), + KUNIT_CASE(test_range1_rand16), + KUNIT_CASE(test_range2_rand0), + KUNIT_CASE(test_range2_rand1), + KUNIT_CASE(test_range2_rand2), + KUNIT_CASE(test_range2_rand16), + {} + /* clang-format on */ +}; + +static struct kunit_suite test_suite = { + .name = "landlock_id", + .test_cases = test_cases, +}; + +kunit_test_init_section_suite(test_suite); + +#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ diff --git a/security/landlock/id.h b/security/landlock/id.h new file mode 100644 index 000000000000..45dcfb9e9a8b --- /dev/null +++ b/security/landlock/id.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Unique identification number generator + * + * Copyright © 2024-2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ID_H +#define _SECURITY_LANDLOCK_ID_H + +#ifdef CONFIG_AUDIT + +void __init landlock_init_id(void); + +u64 landlock_get_id_range(size_t number_of_ids); + +#else /* CONFIG_AUDIT */ + +static inline void __init landlock_init_id(void) +{ +} + +#endif /* CONFIG_AUDIT */ + +#endif /* _SECURITY_LANDLOCK_ID_H */ diff --git a/security/landlock/limits.h b/security/landlock/limits.h index 15f7606066c8..65b5ff051674 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Landlock LSM - Limits for different components + * Landlock - Limits for different components * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #ifndef _SECURITY_LANDLOCK_LIMITS_H @@ -29,6 +30,10 @@ #define LANDLOCK_LAST_SCOPE LANDLOCK_SCOPE_SIGNAL #define LANDLOCK_MASK_SCOPE ((LANDLOCK_LAST_SCOPE << 1) - 1) #define LANDLOCK_NUM_SCOPE __const_hweight64(LANDLOCK_MASK_SCOPE) + +#define LANDLOCK_LAST_RESTRICT_SELF LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF +#define LANDLOCK_MASK_RESTRICT_SELF ((LANDLOCK_LAST_RESTRICT_SELF << 1) - 1) + /* clang-format on */ #endif /* _SECURITY_LANDLOCK_LIMITS_H */ diff --git a/security/landlock/net.c b/security/landlock/net.c index 104b6c01fe50..1f3915a90a80 100644 --- a/security/landlock/net.c +++ b/security/landlock/net.c @@ -1,16 +1,18 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Network management and hooks + * Landlock - Network management and hooks * * Copyright © 2022-2023 Huawei Tech. Co., Ltd. - * Copyright © 2022-2023 Microsoft Corporation + * Copyright © 2022-2025 Microsoft Corporation */ #include <linux/in.h> +#include <linux/lsm_audit.h> #include <linux/net.h> #include <linux/socket.h> #include <net/ipv6.h> +#include "audit.h" #include "common.h" #include "cred.h" #include "limits.h" @@ -39,10 +41,6 @@ int landlock_append_net_rule(struct landlock_ruleset *const ruleset, return err; } -static const struct access_masks any_net = { - .net = ~0, -}; - static int current_check_access_socket(struct socket *const sock, struct sockaddr *const address, const int addrlen, @@ -54,14 +52,15 @@ static int current_check_access_socket(struct socket *const sock, struct landlock_id id = { .type = LANDLOCK_KEY_NET_PORT, }; - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - any_net); + const struct access_masks masks = { + .net = access_request, + }; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), masks, NULL); + struct lsm_network_audit audit_net = {}; - if (!dom) + if (!subject) return 0; - if (WARN_ON_ONCE(dom->num_layers < 1)) - return -EACCES; if (!sk_is_tcp(sock->sk)) return 0; @@ -72,18 +71,48 @@ static int current_check_access_socket(struct socket *const sock, switch (address->sa_family) { case AF_UNSPEC: - case AF_INET: + case AF_INET: { + const struct sockaddr_in *addr4; + if (addrlen < sizeof(struct sockaddr_in)) return -EINVAL; - port = ((struct sockaddr_in *)address)->sin_port; + + addr4 = (struct sockaddr_in *)address; + port = addr4->sin_port; + + if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) { + audit_net.dport = port; + audit_net.v4info.daddr = addr4->sin_addr.s_addr; + } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) { + audit_net.sport = port; + audit_net.v4info.saddr = addr4->sin_addr.s_addr; + } else { + WARN_ON_ONCE(1); + } break; + } #if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: + case AF_INET6: { + const struct sockaddr_in6 *addr6; + if (addrlen < SIN6_LEN_RFC2133) return -EINVAL; - port = ((struct sockaddr_in6 *)address)->sin6_port; + + addr6 = (struct sockaddr_in6 *)address; + port = addr6->sin6_port; + + if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) { + audit_net.dport = port; + audit_net.v6info.daddr = addr6->sin6_addr; + } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) { + audit_net.sport = port; + audit_net.v6info.saddr = addr6->sin6_addr; + } else { + WARN_ON_ONCE(1); + } break; + } #endif /* IS_ENABLED(CONFIG_IPV6) */ default: @@ -145,13 +174,24 @@ static int current_check_access_socket(struct socket *const sock, id.key.data = (__force uintptr_t)port; BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data)); - rule = landlock_find_rule(dom, id); - access_request = landlock_init_layer_masks( - dom, access_request, &layer_masks, LANDLOCK_KEY_NET_PORT); + rule = landlock_find_rule(subject->domain, id); + access_request = landlock_init_layer_masks(subject->domain, + access_request, &layer_masks, + LANDLOCK_KEY_NET_PORT); if (landlock_unmask_layers(rule, access_request, &layer_masks, ARRAY_SIZE(layer_masks))) return 0; + audit_net.family = address->sa_family; + landlock_log_denial(subject, + &(struct landlock_request){ + .type = LANDLOCK_REQUEST_NET_ACCESS, + .audit.type = LSM_AUDIT_DATA_NET, + .audit.u.net = &audit_net, + .access = access_request, + .layer_masks = &layer_masks, + .layer_masks_size = ARRAY_SIZE(layer_masks), + }); return -EACCES; } diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index bff4e40a3093..ce7940efea51 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -23,6 +23,8 @@ #include <linux/workqueue.h> #include "access.h" +#include "audit.h" +#include "domain.h" #include "limits.h" #include "object.h" #include "ruleset.h" @@ -307,22 +309,6 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset, return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers)); } -static void get_hierarchy(struct landlock_hierarchy *const hierarchy) -{ - if (hierarchy) - refcount_inc(&hierarchy->usage); -} - -static void put_hierarchy(struct landlock_hierarchy *hierarchy) -{ - while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { - const struct landlock_hierarchy *const freeme = hierarchy; - - hierarchy = hierarchy->parent; - kfree(freeme); - } -} - static int merge_tree(struct landlock_ruleset *const dst, struct landlock_ruleset *const src, const enum landlock_key_type key_type) @@ -477,7 +463,7 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, err = -EINVAL; goto out_unlock; } - get_hierarchy(parent->hierarchy); + landlock_get_hierarchy(parent->hierarchy); child->hierarchy->parent = parent->hierarchy; out_unlock: @@ -501,7 +487,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) free_rule(freeme, LANDLOCK_KEY_NET_PORT); #endif /* IS_ENABLED(CONFIG_INET) */ - put_hierarchy(ruleset->hierarchy); + landlock_put_hierarchy(ruleset->hierarchy); kfree(ruleset); } @@ -520,6 +506,7 @@ static void free_ruleset_work(struct work_struct *const work) free_ruleset(ruleset); } +/* Only called by hook_cred_free(). */ void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) { if (ruleset && refcount_dec_and_test(&ruleset->usage)) { @@ -534,6 +521,9 @@ void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) * @parent: Parent domain. * @ruleset: New ruleset to be merged. * + * The current task is requesting to be restricted. The subjective credentials + * must not be in an overridden state. cf. landlock_init_hierarchy_log(). + * * Returns the intersection of @parent and @ruleset, or returns @parent if * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. */ @@ -579,6 +569,10 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent, if (err) return ERR_PTR(err); + err = landlock_init_hierarchy_log(new_dom->hierarchy); + if (err) + return ERR_PTR(err); + return no_free_ptr(new_dom); } diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 52f4f0af6ab0..5da9a64f5af7 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -20,6 +20,8 @@ #include "limits.h" #include "object.h" +struct landlock_hierarchy; + /** * struct landlock_layer - Access rights for a given layer */ @@ -109,22 +111,6 @@ struct landlock_rule { }; /** - * struct landlock_hierarchy - Node in a ruleset hierarchy - */ -struct landlock_hierarchy { - /** - * @parent: Pointer to the parent node, or NULL if it is a root - * Landlock domain. - */ - struct landlock_hierarchy *parent; - /** - * @usage: Number of potential children domains plus their parent - * domain. - */ - refcount_t usage; -}; - -/** * struct landlock_ruleset - Landlock ruleset * * This data structure must contain unique entries, be updatable, and quick to @@ -257,36 +243,6 @@ landlock_union_access_masks(const struct landlock_ruleset *const domain) return matches.masks; } -/** - * landlock_get_applicable_domain - Return @domain if it applies to (handles) - * at least one of the access rights specified - * in @masks - * - * @domain: Landlock ruleset (used as a domain) - * @masks: access masks - * - * Returns: @domain if any access rights specified in @masks is handled, or - * NULL otherwise. - */ -static inline const struct landlock_ruleset * -landlock_get_applicable_domain(const struct landlock_ruleset *const domain, - const struct access_masks masks) -{ - const union access_masks_all masks_all = { - .masks = masks, - }; - union access_masks_all merge = {}; - - if (!domain) - return NULL; - - merge.masks = landlock_union_access_masks(domain); - if (merge.all & masks_all.all) - return domain; - - return NULL; -} - static inline void landlock_add_fs_access_mask(struct landlock_ruleset *const ruleset, const access_mask_t fs_access_mask, diff --git a/security/landlock/setup.c b/security/landlock/setup.c index 28519a45b11f..bd53c7a56ab9 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -6,19 +6,27 @@ * Copyright © 2018-2020 ANSSI */ +#include <linux/bits.h> #include <linux/init.h> #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> #include "common.h" #include "cred.h" +#include "errata.h" #include "fs.h" +#include "id.h" #include "net.h" #include "setup.h" #include "task.h" bool landlock_initialized __ro_after_init = false; +const struct lsm_id landlock_lsmid = { + .name = LANDLOCK_NAME, + .id = LSM_ID_LANDLOCK, +}; + struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_cred = sizeof(struct landlock_cred_security), .lbs_file = sizeof(struct landlock_file_security), @@ -26,17 +34,41 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_superblock = sizeof(struct landlock_superblock_security), }; -const struct lsm_id landlock_lsmid = { - .name = LANDLOCK_NAME, - .id = LSM_ID_LANDLOCK, -}; +int landlock_errata __ro_after_init; + +static void __init compute_errata(void) +{ + size_t i; + +#ifndef __has_include + /* + * This is a safeguard to make sure the compiler implements + * __has_include (see errata.h). + */ + WARN_ON_ONCE(1); + return; +#endif + + for (i = 0; landlock_errata_init[i].number; i++) { + const int prev_errata = landlock_errata; + + if (WARN_ON_ONCE(landlock_errata_init[i].abi > + landlock_abi_version)) + continue; + + landlock_errata |= BIT(landlock_errata_init[i].number - 1); + WARN_ON_ONCE(prev_errata == landlock_errata); + } +} static int __init landlock_init(void) { + compute_errata(); landlock_add_cred_hooks(); landlock_add_task_hooks(); landlock_add_fs_hooks(); landlock_add_net_hooks(); + landlock_init_id(); landlock_initialized = true; pr_info("Up and running.\n"); return 0; diff --git a/security/landlock/setup.h b/security/landlock/setup.h index c4252d46d49d..fca307c35fee 100644 --- a/security/landlock/setup.h +++ b/security/landlock/setup.h @@ -11,7 +11,10 @@ #include <linux/lsm_hooks.h> +extern const int landlock_abi_version; + extern bool landlock_initialized; +extern int landlock_errata; extern struct lsm_blob_sizes landlock_blob_sizes; extern const struct lsm_id landlock_lsmid; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index a9760d252fc2..54a9f29e6ebb 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - System call implementations and user space interfaces + * Landlock - System call implementations and user space interfaces * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI + * Copyright © 2021-2025 Microsoft Corporation */ #include <asm/current.h> @@ -28,6 +29,7 @@ #include <uapi/linux/landlock.h> #include "cred.h" +#include "domain.h" #include "fs.h" #include "limits.h" #include "net.h" @@ -151,7 +153,14 @@ static const struct file_operations ruleset_fops = { .write = fop_dummy_write, }; -#define LANDLOCK_ABI_VERSION 6 +/* + * The Landlock ABI version should be incremented for each new Landlock-related + * user space visible change (e.g. Landlock syscalls). This version should + * only be incremented once per Linux release, and the date in + * Documentation/userspace-api/landlock.rst should be updated to reflect the + * UAPI change. + */ +const int landlock_abi_version = 7; /** * sys_landlock_create_ruleset - Create a new ruleset @@ -160,7 +169,9 @@ static const struct file_operations ruleset_fops = { * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). - * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. + * @flags: Supported value: + * - %LANDLOCK_CREATE_RULESET_VERSION + * - %LANDLOCK_CREATE_RULESET_ERRATA * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. @@ -169,6 +180,10 @@ static const struct file_operations ruleset_fops = { * 0, then the returned value is the highest supported Landlock ABI version * (starting at 1). * + * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is + * 0, then the returned value is a bitmask of fixed issues for the current + * Landlock ABI version. + * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; @@ -192,9 +207,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return -EOPNOTSUPP; if (flags) { - if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && - !size) - return LANDLOCK_ABI_VERSION; + if (attr || size) + return -EINVAL; + + if (flags == LANDLOCK_CREATE_RULESET_VERSION) + return landlock_abi_version; + + if (flags == LANDLOCK_CREATE_RULESET_ERRATA) + return landlock_errata; + return -EINVAL; } @@ -429,17 +450,24 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, * sys_landlock_restrict_self - Enforce a ruleset on the calling thread * * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. - * @flags: Must be 0. + * @flags: Supported values: + * + * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF + * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON + * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF * * This system call enables to enforce a Landlock ruleset on the current * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its * namespace or is running with no_new_privs. This avoids scenarios where * unprivileged tasks can affect the behavior of privileged children. * + * It is allowed to only pass the %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF + * flag with a @ruleset_fd value of -1. + * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; - * - %EINVAL: @flags is not 0. + * - %EINVAL: @flags contains an unknown bit. * - %EBADF: @ruleset_fd is not a file descriptor for the current thread; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor; * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the @@ -455,6 +483,8 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, *ruleset __free(landlock_put_ruleset) = NULL; struct cred *new_cred; struct landlock_cred_security *new_llcred; + bool __maybe_unused log_same_exec, log_new_exec, log_subdomains, + prev_log_subdomains; if (!is_initialized()) return -EOPNOTSUPP; @@ -467,14 +497,28 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; - /* No flag for now. */ - if (flags) + if ((flags | LANDLOCK_MASK_RESTRICT_SELF) != + LANDLOCK_MASK_RESTRICT_SELF) return -EINVAL; - /* Gets and checks the ruleset. */ - ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); - if (IS_ERR(ruleset)) - return PTR_ERR(ruleset); + /* Translates "off" flag to boolean. */ + log_same_exec = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF); + /* Translates "on" flag to boolean. */ + log_new_exec = !!(flags & LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON); + /* Translates "off" flag to boolean. */ + log_subdomains = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF); + + /* + * It is allowed to set LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF with + * -1 as ruleset_fd, but no other flag must be set. + */ + if (!(ruleset_fd == -1 && + flags == LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF)) { + /* Gets and checks the ruleset. */ + ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + } /* Prepares new credentials. */ new_cred = prepare_creds(); @@ -483,6 +527,21 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, new_llcred = landlock_cred(new_cred); +#ifdef CONFIG_AUDIT + prev_log_subdomains = !new_llcred->log_subdomains_off; + new_llcred->log_subdomains_off = !prev_log_subdomains || + !log_subdomains; +#endif /* CONFIG_AUDIT */ + + /* + * The only case when a ruleset may not be set is if + * LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF is set and ruleset_fd is -1. + * We could optimize this case by not calling commit_creds() if this flag + * was already set, but it is not worth the complexity. + */ + if (!ruleset) + return commit_creds(new_cred); + /* * There is no possible race condition while copying and manipulating * the current credentials because they are dedicated per thread. @@ -493,8 +552,20 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, return PTR_ERR(new_dom); } +#ifdef CONFIG_AUDIT + new_dom->hierarchy->log_same_exec = log_same_exec; + new_dom->hierarchy->log_new_exec = log_new_exec; + if ((!log_same_exec && !log_new_exec) || !prev_log_subdomains) + new_dom->hierarchy->log_status = LANDLOCK_LOG_DISABLED; +#endif /* CONFIG_AUDIT */ + /* Replaces the old (prepared) domain. */ landlock_put_ruleset(new_llcred->domain); new_llcred->domain = new_dom; + +#ifdef CONFIG_AUDIT + new_llcred->domain_exec |= 1 << (new_dom->num_layers - 1); +#endif /* CONFIG_AUDIT */ + return commit_creds(new_cred); } diff --git a/security/landlock/task.c b/security/landlock/task.c index dc7dab78392e..2385017418ca 100644 --- a/security/landlock/task.c +++ b/security/landlock/task.c @@ -1,23 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Landlock LSM - Ptrace hooks + * Landlock - Ptrace and scope hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2019-2020 ANSSI + * Copyright © 2024-2025 Microsoft Corporation */ #include <asm/current.h> +#include <linux/cleanup.h> #include <linux/cred.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/lsm_audit.h> #include <linux/lsm_hooks.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/sched/signal.h> #include <net/af_unix.h> #include <net/sock.h> +#include "audit.h" #include "common.h" #include "cred.h" +#include "domain.h" #include "fs.h" #include "ruleset.h" #include "setup.h" @@ -37,41 +43,29 @@ static bool domain_scope_le(const struct landlock_ruleset *const parent, { const struct landlock_hierarchy *walker; + /* Quick return for non-landlocked tasks. */ if (!parent) return true; + if (!child) return false; + for (walker = child->hierarchy; walker; walker = walker->parent) { if (walker == parent->hierarchy) /* @parent is in the scoped hierarchy of @child. */ return true; } + /* There is no relationship between @parent and @child. */ return false; } -static bool task_is_scoped(const struct task_struct *const parent, - const struct task_struct *const child) -{ - bool is_scoped; - const struct landlock_ruleset *dom_parent, *dom_child; - - rcu_read_lock(); - dom_parent = landlock_get_task_domain(parent); - dom_child = landlock_get_task_domain(child); - is_scoped = domain_scope_le(dom_parent, dom_child); - rcu_read_unlock(); - return is_scoped; -} - -static int task_ptrace(const struct task_struct *const parent, - const struct task_struct *const child) +static int domain_ptrace(const struct landlock_ruleset *const parent, + const struct landlock_ruleset *const child) { - /* Quick return for non-landlocked tasks. */ - if (!landlocked(parent)) - return 0; - if (task_is_scoped(parent, child)) + if (domain_scope_le(parent, child)) return 0; + return -EPERM; } @@ -91,7 +85,39 @@ static int task_ptrace(const struct task_struct *const parent, static int hook_ptrace_access_check(struct task_struct *const child, const unsigned int mode) { - return task_ptrace(current, child); + const struct landlock_cred_security *parent_subject; + const struct landlock_ruleset *child_dom; + int err; + + /* Quick return for non-landlocked tasks. */ + parent_subject = landlock_cred(current_cred()); + if (!parent_subject) + return 0; + + scoped_guard(rcu) + { + child_dom = landlock_get_task_domain(child); + err = domain_ptrace(parent_subject->domain, child_dom); + } + + if (!err) + return 0; + + /* + * For the ptrace_access_check case, we log the current/parent domain + * and the child task. + */ + if (!(mode & PTRACE_MODE_NOAUDIT)) + landlock_log_denial(parent_subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_PTRACE, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = child, + }, + .layer_plus_one = parent_subject->domain->num_layers, + }); + + return err; } /** @@ -108,7 +134,35 @@ static int hook_ptrace_access_check(struct task_struct *const child, */ static int hook_ptrace_traceme(struct task_struct *const parent) { - return task_ptrace(parent, current); + const struct landlock_cred_security *parent_subject; + const struct landlock_ruleset *child_dom; + int err; + + child_dom = landlock_get_current_domain(); + + guard(rcu)(); + parent_subject = landlock_cred(__task_cred(parent)); + err = domain_ptrace(parent_subject->domain, child_dom); + + if (!err) + return 0; + + /* + * For the ptrace_traceme case, we log the domain which is the cause of + * the denial, which means the parent domain instead of the current + * domain. This may look unusual because the ptrace_traceme action is a + * request to be traced, but the semantic is consistent with + * hook_ptrace_access_check(). + */ + landlock_log_denial(parent_subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_PTRACE, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = current, + }, + .layer_plus_one = parent_subject->domain->num_layers, + }); + return err; } /** @@ -127,7 +181,7 @@ static bool domain_is_scoped(const struct landlock_ruleset *const client, access_mask_t scope) { int client_layer, server_layer; - struct landlock_hierarchy *client_walker, *server_walker; + const struct landlock_hierarchy *client_walker, *server_walker; /* Quick return if client has no domain */ if (WARN_ON_ONCE(!client)) @@ -212,28 +266,43 @@ static int hook_unix_stream_connect(struct sock *const sock, struct sock *const other, struct sock *const newsk) { - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - unix_scope); + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), unix_scope, + &handle_layer); /* Quick return for non-landlocked tasks. */ - if (!dom) + if (!subject) + return 0; + + if (!is_abstract_socket(other)) return 0; - if (is_abstract_socket(other) && sock_is_scoped(other, dom)) - return -EPERM; + if (!sock_is_scoped(other, subject->domain)) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + .audit = { + .type = LSM_AUDIT_DATA_NET, + .u.net = &(struct lsm_network_audit) { + .sk = other, + }, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static int hook_unix_may_send(struct socket *const sock, struct socket *const other) { - const struct landlock_ruleset *const dom = - landlock_get_applicable_domain(landlock_get_current_domain(), - unix_scope); + size_t handle_layer; + const struct landlock_cred_security *const subject = + landlock_get_applicable_subject(current_cred(), unix_scope, + &handle_layer); - if (!dom) + if (!subject) return 0; /* @@ -243,10 +312,23 @@ static int hook_unix_may_send(struct socket *const sock, if (unix_peer(sock->sk) == other->sk) return 0; - if (is_abstract_socket(other->sk) && sock_is_scoped(other->sk, dom)) - return -EPERM; + if (!is_abstract_socket(other->sk)) + return 0; + + if (!sock_is_scoped(other->sk, subject->domain)) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET, + .audit = { + .type = LSM_AUDIT_DATA_NET, + .u.net = &(struct lsm_network_audit) { + .sk = other->sk, + }, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static const struct access_masks signal_scope = { @@ -255,56 +337,97 @@ static const struct access_masks signal_scope = { static int hook_task_kill(struct task_struct *const p, struct kernel_siginfo *const info, const int sig, - const struct cred *const cred) + const struct cred *cred) { bool is_scoped; - const struct landlock_ruleset *dom; - - if (cred) { - /* Dealing with USB IO. */ - dom = landlock_cred(cred)->domain; - } else { - dom = landlock_get_current_domain(); + size_t handle_layer; + const struct landlock_cred_security *subject; + + if (!cred) { + /* + * Always allow sending signals between threads of the same process. + * This is required for process credential changes by the Native POSIX + * Threads Library and implemented by the set*id(2) wrappers and + * libcap(3) with tgkill(2). See nptl(7) and libpsx(3). + * + * This exception is similar to the __ptrace_may_access() one. + */ + if (same_thread_group(p, current)) + return 0; + + /* Not dealing with USB IO. */ + cred = current_cred(); } - dom = landlock_get_applicable_domain(dom, signal_scope); + + subject = landlock_get_applicable_subject(cred, signal_scope, + &handle_layer); /* Quick return for non-landlocked tasks. */ - if (!dom) + if (!subject) return 0; - rcu_read_lock(); - is_scoped = domain_is_scoped(dom, landlock_get_task_domain(p), - LANDLOCK_SCOPE_SIGNAL); - rcu_read_unlock(); - if (is_scoped) - return -EPERM; + scoped_guard(rcu) + { + is_scoped = domain_is_scoped(subject->domain, + landlock_get_task_domain(p), + signal_scope.scope); + } + + if (!is_scoped) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_SIGNAL, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = p, + }, + .layer_plus_one = handle_layer + 1, + }); + return -EPERM; } static int hook_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { - const struct landlock_ruleset *dom; + const struct landlock_cred_security *subject; bool is_scoped = false; /* Lock already held by send_sigio() and send_sigurg(). */ lockdep_assert_held(&fown->lock); - dom = landlock_get_applicable_domain( - landlock_file(fown->file)->fown_domain, signal_scope); + subject = &landlock_file(fown->file)->fown_subject; - /* Quick return for unowned socket. */ - if (!dom) + /* + * Quick return for unowned socket. + * + * subject->domain has already been filtered when saved by + * hook_file_set_fowner(), so there is no need to call + * landlock_get_applicable_subject() here. + */ + if (!subject->domain) return 0; - rcu_read_lock(); - is_scoped = domain_is_scoped(dom, landlock_get_task_domain(tsk), - LANDLOCK_SCOPE_SIGNAL); - rcu_read_unlock(); - if (is_scoped) - return -EPERM; + scoped_guard(rcu) + { + is_scoped = domain_is_scoped(subject->domain, + landlock_get_task_domain(tsk), + signal_scope.scope); + } + + if (!is_scoped) + return 0; - return 0; + landlock_log_denial(subject, &(struct landlock_request) { + .type = LANDLOCK_REQUEST_SCOPE_SIGNAL, + .audit = { + .type = LSM_AUDIT_DATA_TASK, + .u.tsk = tsk, + }, +#ifdef CONFIG_AUDIT + .layer_plus_one = landlock_file(fown->file)->fown_layer + 1, +#endif /* CONFIG_AUDIT */ + }); + return -EPERM; } static struct security_hook_list landlock_hooks[] __ro_after_init = { diff --git a/security/lsm_audit.c b/security/lsm_audit.c index d45651eaefa4..1b942b4908a2 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -189,16 +189,13 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, } /** - * dump_common_audit_data - helper to dump common audit data + * audit_log_lsm_data - helper to log common LSM audit data * @ab : the audit buffer * @a : common audit data - * */ -static void dump_common_audit_data(struct audit_buffer *ab, - struct common_audit_data *a) +void audit_log_lsm_data(struct audit_buffer *ab, + const struct common_audit_data *a) { - char comm[sizeof(current->comm)]; - /* * To keep stack sizes in check force programmers to notice if they * start making this union too large! See struct lsm_network_audit @@ -206,9 +203,6 @@ static void dump_common_audit_data(struct audit_buffer *ab, */ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2); - audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); - audit_log_untrustedstring(ab, get_task_comm(comm, current)); - switch (a->type) { case LSM_AUDIT_DATA_NONE: return; @@ -432,6 +426,21 @@ static void dump_common_audit_data(struct audit_buffer *ab, } /** + * dump_common_audit_data - helper to dump common audit data + * @ab : the audit buffer + * @a : common audit data + */ +static void dump_common_audit_data(struct audit_buffer *ab, + const struct common_audit_data *a) +{ + char comm[sizeof(current->comm)]; + + audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); + audit_log_untrustedstring(ab, get_task_comm(comm, current)); + audit_log_lsm_data(ab, a); +} + +/** * common_lsm_audit - generic LSM auditing function * @a: auxiliary audit data * @pre_audit: lsm-specific pre-audit callback |