summaryrefslogtreecommitdiff
path: root/mm/damon/tests
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/tests')
-rw-r--r--mm/damon/tests/core-kunit.h708
-rw-r--r--mm/damon/tests/sysfs-kunit.h25
-rw-r--r--mm/damon/tests/vaddr-kunit.h26
3 files changed, 711 insertions, 48 deletions
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index 51369e35298b..a1eff023e928 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -20,11 +20,17 @@ static void damon_test_regions(struct kunit *test)
struct damon_target *t;
r = damon_new_region(1, 2);
+ if (!r)
+ kunit_skip(test, "region alloc fail");
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
t = damon_new_target();
+ if (!t) {
+ damon_free_region(r);
+ kunit_skip(test, "target alloc fail");
+ }
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_add_region(r, t);
@@ -52,7 +58,14 @@ static void damon_test_target(struct kunit *test)
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
t = damon_new_target();
+ if (!t) {
+ damon_destroy_ctx(c);
+ kunit_skip(test, "target alloc fail");
+ }
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_add_target(c, t);
@@ -84,8 +97,15 @@ static void damon_test_aggregate(struct kunit *test)
struct damon_region *r;
int it, ir;
+ if (!ctx)
+ kunit_skip(test, "ctx alloc fail");
+
for (it = 0; it < 3; it++) {
t = damon_new_target();
+ if (!t) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "target alloc fail");
+ }
damon_add_target(ctx, t);
}
@@ -93,6 +113,10 @@ static void damon_test_aggregate(struct kunit *test)
damon_for_each_target(t, ctx) {
for (ir = 0; ir < 3; ir++) {
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
+ if (!r) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = accesses[it][ir];
r->nr_accesses_bp = accesses[it][ir] * 10000;
damon_add_region(r, t);
@@ -120,12 +144,17 @@ static void damon_test_aggregate(struct kunit *test)
static void damon_test_split_at(struct kunit *test)
{
- struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r, *r_new;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 100);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses_bp = 420000;
r->nr_accesses = 42;
r->last_nr_accesses = 15;
@@ -143,7 +172,6 @@ static void damon_test_split_at(struct kunit *test)
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
damon_free_target(t);
- damon_destroy_ctx(c);
}
static void damon_test_merge_two(struct kunit *test)
@@ -153,11 +181,21 @@ static void damon_test_merge_two(struct kunit *test)
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 100);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = 10;
r->nr_accesses_bp = 100000;
damon_add_region(r, t);
r2 = damon_new_region(100, 300);
+ if (!r2) {
+ damon_free_target(t);
+ kunit_skip(test, "second region alloc fail");
+ }
r2->nr_accesses = 20;
r2->nr_accesses_bp = 200000;
damon_add_region(r2, t);
@@ -203,8 +241,14 @@ static void damon_test_merge_regions_of(struct kunit *test)
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
for (i = 0; i < ARRAY_SIZE(sa); i++) {
r = damon_new_region(sa[i], ea[i]);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = nrs[i];
r->nr_accesses_bp = nrs[i] * 10000;
damon_add_region(r, t);
@@ -223,24 +267,34 @@ static void damon_test_merge_regions_of(struct kunit *test)
static void damon_test_split_regions_of(struct kunit *test)
{
- struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 22);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
- damon_split_regions_of(t, 2, DAMON_MIN_REGION);
+ damon_split_regions_of(t, 2, 1);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
damon_free_target(t);
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "second target alloc fail");
r = damon_new_region(0, 220);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "second region alloc fail");
+ }
damon_add_region(r, t);
- damon_split_regions_of(t, 4, DAMON_MIN_REGION);
+ damon_split_regions_of(t, 4, 1);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
damon_free_target(t);
- damon_destroy_ctx(c);
}
static void damon_test_ops_registration(struct kunit *test)
@@ -249,6 +303,9 @@ static void damon_test_ops_registration(struct kunit *test)
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
bool need_cleanup = false;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
bak.id = DAMON_OPS_VADDR;
@@ -294,16 +351,29 @@ static void damon_test_ops_registration(struct kunit *test)
static void damon_test_set_regions(struct kunit *test)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r1 = damon_new_region(4, 16);
- struct damon_region *r2 = damon_new_region(24, 32);
+ struct damon_region *r1, *r2;
struct damon_addr_range range = {.start = 8, .end = 28};
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
int expect_idx = 0;
struct damon_region *r;
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+ r1 = damon_new_region(4, 16);
+ if (!r1) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
+ r2 = damon_new_region(24, 32);
+ if (!r2) {
+ damon_free_target(t);
+ damon_free_region(r1);
+ kunit_skip(test, "second region alloc fail");
+ }
+
damon_add_region(r1, t);
damon_add_region(r2, t);
- damon_set_regions(t, &range, 1, DAMON_MIN_REGION);
+ damon_set_regions(t, &range, 1, 1);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
damon_for_each_region(r, t) {
@@ -342,6 +412,9 @@ static void damon_test_update_monitoring_result(struct kunit *test)
struct damon_attrs new_attrs;
struct damon_region *r = damon_new_region(3, 7);
+ if (!r)
+ kunit_skip(test, "region alloc fail");
+
r->nr_accesses = 15;
r->nr_accesses_bp = 150000;
r->age = 20;
@@ -375,6 +448,9 @@ static void damon_test_set_attrs(struct kunit *test)
.sample_interval = 5000, .aggr_interval = 100000,};
struct damon_attrs invalid_attrs;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs;
@@ -412,6 +488,8 @@ static void damos_test_new_filter(struct kunit *test)
struct damos_filter *filter;
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
+ if (!filter)
+ kunit_skip(test, "filter alloc fail");
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
KUNIT_EXPECT_EQ(test, filter->matching, true);
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
@@ -419,20 +497,535 @@ static void damos_test_new_filter(struct kunit *test)
damos_destroy_filter(filter);
}
+static void damos_test_commit_quota_goal_for(struct kunit *test,
+ struct damos_quota_goal *dst,
+ struct damos_quota_goal *src)
+{
+ u64 dst_last_psi_total = 0;
+
+ if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
+ dst_last_psi_total = dst->last_psi_total;
+ damos_commit_quota_goal(dst, src);
+
+ KUNIT_EXPECT_EQ(test, dst->metric, src->metric);
+ KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value);
+ if (src->metric == DAMOS_QUOTA_USER_INPUT)
+ KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value);
+ if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
+ KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total);
+ switch (dst->metric) {
+ case DAMOS_QUOTA_NODE_MEM_USED_BP:
+ case DAMOS_QUOTA_NODE_MEM_FREE_BP:
+ KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
+ break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
+ KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
+ break;
+ default:
+ break;
+ }
+}
+
+static void damos_test_commit_quota_goal(struct kunit *test)
+{
+ struct damos_quota_goal dst = {
+ .metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
+ .target_value = 1000,
+ .current_value = 123,
+ .last_psi_total = 456,
+ };
+
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 789,
+ .current_value = 12});
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ .target_value = 345,
+ .current_value = 678,
+ .nid = 9,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEM_USED_BP,
+ .target_value = 12,
+ .current_value = 345,
+ .nid = 6,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ .target_value = 456,
+ .current_value = 567,
+ .nid = 6,
+ .memcg_id = 7,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ .target_value = 890,
+ .current_value = 901,
+ .nid = 10,
+ .memcg_id = 1,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal) {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 789,
+ .current_value = 12,
+ });
+}
+
+static void damos_test_commit_quota_goals_for(struct kunit *test,
+ struct damos_quota_goal *dst_goals, int nr_dst_goals,
+ struct damos_quota_goal *src_goals, int nr_src_goals)
+{
+ struct damos_quota dst, src;
+ struct damos_quota_goal *goal, *next;
+ bool skip = true;
+ int i;
+
+ INIT_LIST_HEAD(&dst.goals);
+ INIT_LIST_HEAD(&src.goals);
+
+ for (i = 0; i < nr_dst_goals; i++) {
+ /*
+ * When nr_src_goals is smaller than dst_goals,
+ * damos_commit_quota_goals() will kfree() the dst goals.
+ * Make it kfree()-able.
+ */
+ goal = damos_new_quota_goal(dst_goals[i].metric,
+ dst_goals[i].target_value);
+ if (!goal)
+ goto out;
+ damos_add_quota_goal(&dst, goal);
+ }
+ skip = false;
+ for (i = 0; i < nr_src_goals; i++)
+ damos_add_quota_goal(&src, &src_goals[i]);
+
+ damos_commit_quota_goals(&dst, &src);
+
+ i = 0;
+ damos_for_each_quota_goal(goal, (&dst)) {
+ KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric);
+ KUNIT_EXPECT_EQ(test, goal->target_value,
+ src_goals[i++].target_value);
+ }
+ KUNIT_EXPECT_EQ(test, i, nr_src_goals);
+
+out:
+ damos_for_each_quota_goal_safe(goal, next, (&dst))
+ damos_destroy_quota_goal(goal);
+ if (skip)
+ kunit_skip(test, "goal alloc fail");
+}
+
+static void damos_test_commit_quota_goals(struct kunit *test)
+{
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){}, 0,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 123,
+ },
+ }, 1);
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 234,
+ },
+
+ }, 1,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 345,
+ },
+ }, 1);
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 456,
+ },
+
+ }, 1,
+ (struct damos_quota_goal[]){}, 0);
+}
+
+static void damos_test_commit_quota(struct kunit *test)
+{
+ struct damos_quota dst = {
+ .reset_interval = 1,
+ .ms = 2,
+ .sz = 3,
+ .weight_sz = 4,
+ .weight_nr_accesses = 5,
+ .weight_age = 6,
+ };
+ struct damos_quota src = {
+ .reset_interval = 7,
+ .ms = 8,
+ .sz = 9,
+ .weight_sz = 10,
+ .weight_nr_accesses = 11,
+ .weight_age = 12,
+ };
+
+ INIT_LIST_HEAD(&dst.goals);
+ INIT_LIST_HEAD(&src.goals);
+
+ damos_commit_quota(&dst, &src);
+
+ KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval);
+ KUNIT_EXPECT_EQ(test, dst.ms, src.ms);
+ KUNIT_EXPECT_EQ(test, dst.sz, src.sz);
+ KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz);
+ KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age);
+}
+
+static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
+ unsigned int *node_id_arr, unsigned int *weight_arr,
+ size_t nr_dests)
+{
+ size_t i;
+
+ dests->node_id_arr = kmalloc_array(nr_dests,
+ sizeof(*dests->node_id_arr), GFP_KERNEL);
+ if (!dests->node_id_arr)
+ return -ENOMEM;
+ dests->weight_arr = kmalloc_array(nr_dests,
+ sizeof(*dests->weight_arr), GFP_KERNEL);
+ if (!dests->weight_arr) {
+ kfree(dests->node_id_arr);
+ dests->node_id_arr = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_dests; i++) {
+ dests->node_id_arr[i] = node_id_arr[i];
+ dests->weight_arr[i] = weight_arr[i];
+ }
+ dests->nr_dests = nr_dests;
+ return 0;
+}
+
+static void damos_test_help_dests_free(struct damos_migrate_dests *dests)
+{
+ kfree(dests->node_id_arr);
+ kfree(dests->weight_arr);
+}
+
+static void damos_test_commit_dests_for(struct kunit *test,
+ unsigned int *dst_node_id_arr, unsigned int *dst_weight_arr,
+ size_t dst_nr_dests,
+ unsigned int *src_node_id_arr, unsigned int *src_weight_arr,
+ size_t src_nr_dests)
+{
+ struct damos_migrate_dests dst = {}, src = {};
+ int i, err;
+ bool skip = true;
+
+ err = damos_test_help_dests_setup(&dst, dst_node_id_arr,
+ dst_weight_arr, dst_nr_dests);
+ if (err)
+ kunit_skip(test, "dests setup fail");
+ err = damos_test_help_dests_setup(&src, src_node_id_arr,
+ src_weight_arr, src_nr_dests);
+ if (err) {
+ damos_test_help_dests_free(&dst);
+ kunit_skip(test, "src setup fail");
+ }
+ err = damos_commit_dests(&dst, &src);
+ if (err)
+ goto out;
+ skip = false;
+
+ KUNIT_EXPECT_EQ(test, dst.nr_dests, src_nr_dests);
+ for (i = 0; i < dst.nr_dests; i++) {
+ KUNIT_EXPECT_EQ(test, dst.node_id_arr[i], src_node_id_arr[i]);
+ KUNIT_EXPECT_EQ(test, dst.weight_arr[i], src_weight_arr[i]);
+ }
+
+out:
+ damos_test_help_dests_free(&dst);
+ damos_test_help_dests_free(&src);
+ if (skip)
+ kunit_skip(test, "skip");
+}
+
+static void damos_test_commit_dests(struct kunit *test)
+{
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2}, (unsigned int[]){2, 3},
+ 2,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ NULL, NULL, 0,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ (unsigned int[]){4, 5}, (unsigned int[]){5, 6}, 2);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ NULL, NULL, 0);
+}
+
+static void damos_test_commit_filter_for(struct kunit *test,
+ struct damos_filter *dst, struct damos_filter *src)
+{
+ damos_commit_filter(dst, src);
+ KUNIT_EXPECT_EQ(test, dst->type, src->type);
+ KUNIT_EXPECT_EQ(test, dst->matching, src->matching);
+ KUNIT_EXPECT_EQ(test, dst->allow, src->allow);
+ switch (src->type) {
+ case DAMOS_FILTER_TYPE_MEMCG:
+ KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
+ break;
+ case DAMOS_FILTER_TYPE_ADDR:
+ KUNIT_EXPECT_EQ(test, dst->addr_range.start,
+ src->addr_range.start);
+ KUNIT_EXPECT_EQ(test, dst->addr_range.end,
+ src->addr_range.end);
+ break;
+ case DAMOS_FILTER_TYPE_TARGET:
+ KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx);
+ break;
+ case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+ KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min);
+ KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max);
+ break;
+ default:
+ break;
+ }
+}
+
static void damos_test_commit_filter(struct kunit *test)
{
- struct damos_filter *src_filter = damos_new_filter(
- DAMOS_FILTER_TYPE_ANON, true, true);
- struct damos_filter *dst_filter = damos_new_filter(
- DAMOS_FILTER_TYPE_ACTIVE, false, false);
+ struct damos_filter dst = {
+ .type = DAMOS_FILTER_TYPE_ACTIVE,
+ .matching = false,
+ .allow = false,
+ };
+
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_ANON,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_MEMCG,
+ .matching = false,
+ .allow = false,
+ .memcg_id = 123,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_YOUNG,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ .matching = false,
+ .allow = false,
+ .sz_range = {.min = 234, .max = 345},
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_UNMAPPED,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_ADDR,
+ .matching = false,
+ .allow = false,
+ .addr_range = {.start = 456, .end = 567},
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_TARGET,
+ .matching = true,
+ .allow = true,
+ .target_idx = 6,
+ });
+}
+
+static void damos_test_help_initailize_scheme(struct damos *scheme)
+{
+ INIT_LIST_HEAD(&scheme->quota.goals);
+ INIT_LIST_HEAD(&scheme->core_filters);
+ INIT_LIST_HEAD(&scheme->ops_filters);
+}
- damos_commit_filter(dst_filter, src_filter);
- KUNIT_EXPECT_EQ(test, dst_filter->type, src_filter->type);
- KUNIT_EXPECT_EQ(test, dst_filter->matching, src_filter->matching);
- KUNIT_EXPECT_EQ(test, dst_filter->allow, src_filter->allow);
+static void damos_test_commit_for(struct kunit *test, struct damos *dst,
+ struct damos *src)
+{
+ int err;
+
+ damos_test_help_initailize_scheme(dst);
+ damos_test_help_initailize_scheme(src);
+
+ err = damos_commit(dst, src);
+ if (err)
+ kunit_skip(test, "damos_commit fail");
+
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_sz_region,
+ src->pattern.min_sz_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_sz_region,
+ src->pattern.max_sz_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_nr_accesses,
+ src->pattern.min_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_nr_accesses,
+ src->pattern.max_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_age_region,
+ src->pattern.min_age_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_age_region,
+ src->pattern.max_age_region);
+
+ KUNIT_EXPECT_EQ(test, dst->action, src->action);
+ KUNIT_EXPECT_EQ(test, dst->apply_interval_us, src->apply_interval_us);
+
+ KUNIT_EXPECT_EQ(test, dst->wmarks.metric, src->wmarks.metric);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.interval, src->wmarks.interval);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.high, src->wmarks.high);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.mid, src->wmarks.mid);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.low, src->wmarks.low);
+
+ switch (src->action) {
+ case DAMOS_MIGRATE_COLD:
+ case DAMOS_MIGRATE_HOT:
+ KUNIT_EXPECT_EQ(test, dst->target_nid, src->target_nid);
+ break;
+ default:
+ break;
+ }
+}
- damos_destroy_filter(src_filter);
- damos_destroy_filter(dst_filter);
+static void damos_test_commit(struct kunit *test)
+{
+ damos_test_commit_for(test,
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 1, 2, 3, 4, 5, 6},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 1000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 900, 100, 50},
+ },
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 2, 3, 4, 5, 6, 7},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 2000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 800, 50, 30},
+ });
+ damos_test_commit_for(test,
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 1, 2, 3, 4, 5, 6},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 1000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 900, 100, 50},
+ },
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 2, 3, 4, 5, 6, 7},
+ .action = DAMOS_MIGRATE_HOT,
+ .apply_interval_us = 2000000,
+ .target_nid = 5,
+ });
+}
+
+static struct damon_target *damon_test_help_setup_target(
+ unsigned long region_start_end[][2], int nr_regions)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ int i;
+
+ t = damon_new_target();
+ if (!t)
+ return NULL;
+ for (i = 0; i < nr_regions; i++) {
+ r = damon_new_region(region_start_end[i][0],
+ region_start_end[i][1]);
+ if (!r) {
+ damon_free_target(t);
+ return NULL;
+ }
+ damon_add_region(r, t);
+ }
+ return t;
+}
+
+static void damon_test_commit_target_regions_for(struct kunit *test,
+ unsigned long dst_start_end[][2], int nr_dst_regions,
+ unsigned long src_start_end[][2], int nr_src_regions,
+ unsigned long expect_start_end[][2], int nr_expect_regions)
+{
+ struct damon_target *dst_target, *src_target;
+ struct damon_region *r;
+ int i;
+
+ dst_target = damon_test_help_setup_target(dst_start_end, nr_dst_regions);
+ if (!dst_target)
+ kunit_skip(test, "dst target setup fail");
+ src_target = damon_test_help_setup_target(src_start_end, nr_src_regions);
+ if (!src_target) {
+ damon_free_target(dst_target);
+ kunit_skip(test, "src target setup fail");
+ }
+ damon_commit_target_regions(dst_target, src_target, 1);
+ i = 0;
+ damon_for_each_region(r, dst_target) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, expect_start_end[i][0]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expect_start_end[i][1]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(dst_target), nr_expect_regions);
+ KUNIT_EXPECT_EQ(test, i, nr_expect_regions);
+ damon_free_target(dst_target);
+ damon_free_target(src_target);
+}
+
+static void damon_test_commit_target_regions(struct kunit *test)
+{
+ damon_test_commit_target_regions_for(test,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
+ (unsigned long[][2]) {{4, 6}}, 1,
+ (unsigned long[][2]) {{4, 6}}, 1);
+ damon_test_commit_target_regions_for(test,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
+ (unsigned long[][2]) {}, 0,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2);
}
static void damos_test_filter_out(struct kunit *test)
@@ -442,58 +1035,66 @@ static void damos_test_filter_out(struct kunit *test)
struct damos_filter *f;
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
- f->addr_range = (struct damon_addr_range){
- .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
+ if (!f)
+ kunit_skip(test, "filter alloc fail");
+ f->addr_range = (struct damon_addr_range){.start = 2, .end = 6};
t = damon_new_target();
- r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
+ if (!t) {
+ damos_destroy_filter(f);
+ kunit_skip(test, "target alloc fail");
+ }
+ r = damon_new_region(3, 5);
+ if (!r) {
+ damos_destroy_filter(f);
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
/* region in the range */
- KUNIT_EXPECT_TRUE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 2;
+ r->ar.start = 1;
+ r->ar.end = 2;
KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region after the range */
- r->ar.start = DAMON_MIN_REGION * 6;
- r->ar.end = DAMON_MIN_REGION * 8;
+ r->ar.start = 6;
+ r->ar.end = 8;
KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region started before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 4;
- KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ r->ar.start = 1;
+ r->ar.end = 4;
+ KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f, 1));
/* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 1);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 2);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, 2);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, 4);
damon_destroy_region(r2, t);
/* region started in the range */
- r->ar.start = DAMON_MIN_REGION * 2;
- r->ar.end = DAMON_MIN_REGION * 8;
+ r->ar.start = 2;
+ r->ar.end = 8;
KUNIT_EXPECT_TRUE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
/* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 2);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 6);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, 6);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, 8);
damon_destroy_region(r2, t);
damon_free_target(t);
@@ -536,7 +1137,7 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
struct damos scheme;
struct damos_filter *target_filter, *anon_filter;
- INIT_LIST_HEAD(&scheme.filters);
+ INIT_LIST_HEAD(&scheme.core_filters);
INIT_LIST_HEAD(&scheme.ops_filters);
damos_set_filters_default_reject(&scheme);
@@ -548,6 +1149,8 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
+ if (!target_filter)
+ kunit_skip(test, "filter alloc fail");
damos_add_filter(&scheme, target_filter);
damos_set_filters_default_reject(&scheme);
/*
@@ -573,6 +1176,10 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
+ if (!anon_filter) {
+ damos_free_filter(target_filter);
+ kunit_skip(test, "anon_filter alloc fail");
+ }
damos_add_filter(&scheme, anon_filter);
damos_set_filters_default_reject(&scheme);
@@ -598,6 +1205,9 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
*/
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
+
+ damos_free_filter(anon_filter);
+ damos_free_filter(target_filter);
}
static struct kunit_case damon_test_cases[] = {
@@ -615,7 +1225,13 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_set_attrs),
KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
+ KUNIT_CASE(damos_test_commit_quota_goal),
+ KUNIT_CASE(damos_test_commit_quota_goals),
+ KUNIT_CASE(damos_test_commit_quota),
+ KUNIT_CASE(damos_test_commit_dests),
KUNIT_CASE(damos_test_commit_filter),
+ KUNIT_CASE(damos_test_commit),
+ KUNIT_CASE(damon_test_commit_target_regions),
KUNIT_CASE(damos_test_filter_out),
KUNIT_CASE(damon_test_feed_loop_next_input),
KUNIT_CASE(damon_test_set_filters_default_reject),
diff --git a/mm/damon/tests/sysfs-kunit.h b/mm/damon/tests/sysfs-kunit.h
index 7b5c7b307da9..0c665ed255a3 100644
--- a/mm/damon/tests/sysfs-kunit.h
+++ b/mm/damon/tests/sysfs-kunit.h
@@ -45,16 +45,41 @@ static void damon_sysfs_test_add_targets(struct kunit *test)
struct damon_ctx *ctx;
sysfs_targets = damon_sysfs_targets_alloc();
+ if (!sysfs_targets)
+ kunit_skip(test, "sysfs_targets alloc fail");
sysfs_targets->nr = 1;
sysfs_targets->targets_arr = kmalloc_array(1,
sizeof(*sysfs_targets->targets_arr), GFP_KERNEL);
+ if (!sysfs_targets->targets_arr) {
+ kfree(sysfs_targets);
+ kunit_skip(test, "targets_arr alloc fail");
+ }
sysfs_target = damon_sysfs_target_alloc();
+ if (!sysfs_target) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kunit_skip(test, "sysfs_target alloc fail");
+ }
sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100);
sysfs_target->regions = damon_sysfs_regions_alloc();
+ if (!sysfs_target->regions) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target);
+ kunit_skip(test, "sysfs_regions alloc fail");
+ }
+
sysfs_targets->targets_arr[0] = sysfs_target;
ctx = damon_new_ctx();
+ if (!ctx) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target->regions);
+ kfree(sysfs_target);
+ kunit_skip(test, "ctx alloc fail");
+ }
damon_sysfs_add_targets(ctx, sysfs_targets);
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h
index fce38dd53cf8..30dc5459f1d2 100644
--- a/mm/damon/tests/vaddr-kunit.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -136,8 +136,14 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
for (i = 0; i < nr_regions / 2; i++) {
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
+ if (!r) {
+ damon_destroy_target(t, NULL);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
}
@@ -250,7 +256,16 @@ static void damon_test_split_evenly_fail(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
+ struct damon_region *r;
+
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+
+ r = damon_new_region(start, end);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test,
@@ -269,10 +284,17 @@ static void damon_test_split_evenly_succ(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
+ struct damon_region *r;
unsigned long expected_width = (end - start) / nr_pieces;
unsigned long i = 0;
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+ r = damon_new_region(start, end);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test,
damon_va_evenly_split_region(t, r, nr_pieces), 0);