landlock: Improve bit operations in audit code

Use the BIT() and BIT_ULL() macros in the new audit code instead of
explicit shifts to improve readability.  Use bitmask instead of modulo
operation to simplify code.

Add test_range1_rand15() and test_range2_rand15() KUnit tests to improve
get_id_range() coverage.

Signed-off-by: Günther Noack <gnoack3000@gmail.com>
Link: https://lore.kernel.org/r/20250512093732.1408485-1-mic@digikod.net
Signed-off-by: Mickaël Salaün <mic@digikod.net>
This commit is contained in:
Mickaël Salaün 2025-05-12 11:37:30 +02:00
parent b1525d0a8d
commit 3039ed4327
No known key found for this signature in database
GPG key ID: E5E3D0E88C82F6D2
3 changed files with 34 additions and 4 deletions

View file

@ -437,7 +437,7 @@ void landlock_log_denial(const struct landlock_cred_security *const subject,
return;
/* Checks if the current exec was restricting itself. */
if (subject->domain_exec & (1 << youngest_layer)) {
if (subject->domain_exec & BIT(youngest_layer)) {
/* Ignores denials for the same execution. */
if (!youngest_denied->log_same_exec)
return;

View file

@ -7,6 +7,7 @@
#include <kunit/test.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/random.h>
#include <linux/spinlock.h>
@ -25,7 +26,7 @@ static void __init init_id(atomic64_t *const counter, const u32 random_32bits)
* Ensures sure 64-bit values are always used by user space (or may
* fail with -EOVERFLOW), and makes this testable.
*/
init = 1ULL << 32;
init = BIT_ULL(32);
/*
* Makes a large (2^32) boot-time value to limit ID collision in logs
@ -105,7 +106,7 @@ static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter,
* to get a new ID (e.g. a full landlock_restrict_self() call), and the
* cost of draining all available IDs during the system's uptime.
*/
random_4bits = random_4bits % (1 << 4);
random_4bits &= 0b1111;
step = number_of_ids + random_4bits;
/* It is safe to cast a signed atomic to an unsigned value. */
@ -144,6 +145,19 @@ static void test_range1_rand1(struct kunit *const test)
init + 2);
}
static void test_range1_rand15(struct kunit *const test)
{
atomic64_t counter;
u64 init;
init = get_random_u32();
atomic64_set(&counter, init);
KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 15), init);
KUNIT_EXPECT_EQ(
test, get_id_range(get_random_u8(), &counter, get_random_u8()),
init + 16);
}
static void test_range1_rand16(struct kunit *const test)
{
atomic64_t counter;
@ -196,6 +210,19 @@ static void test_range2_rand2(struct kunit *const test)
init + 4);
}
static void test_range2_rand15(struct kunit *const test)
{
atomic64_t counter;
u64 init;
init = get_random_u32();
atomic64_set(&counter, init);
KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 15), init);
KUNIT_EXPECT_EQ(
test, get_id_range(get_random_u8(), &counter, get_random_u8()),
init + 17);
}
static void test_range2_rand16(struct kunit *const test)
{
atomic64_t counter;
@ -232,10 +259,12 @@ static struct kunit_case __refdata test_cases[] = {
KUNIT_CASE(test_init_once),
KUNIT_CASE(test_range1_rand0),
KUNIT_CASE(test_range1_rand1),
KUNIT_CASE(test_range1_rand15),
KUNIT_CASE(test_range1_rand16),
KUNIT_CASE(test_range2_rand0),
KUNIT_CASE(test_range2_rand1),
KUNIT_CASE(test_range2_rand2),
KUNIT_CASE(test_range2_rand15),
KUNIT_CASE(test_range2_rand16),
{}
/* clang-format on */

View file

@ -9,6 +9,7 @@
#include <asm/current.h>
#include <linux/anon_inodes.h>
#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/capability.h>
#include <linux/cleanup.h>
@ -563,7 +564,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
new_llcred->domain = new_dom;
#ifdef CONFIG_AUDIT
new_llcred->domain_exec |= 1 << (new_dom->num_layers - 1);
new_llcred->domain_exec |= BIT(new_dom->num_layers - 1);
#endif /* CONFIG_AUDIT */
return commit_creds(new_cred);