diff options
author | Takashi Iwai <tiwai@suse.de> | 2024-10-25 10:54:09 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2024-10-25 10:54:09 +0200 |
commit | 0216ded72db896b24cbdd8cd6531482571b25cf6 (patch) | |
tree | 56b578e29657bd3df8236c5f0cf6b8f13c17232a /lib/maple_tree.c | |
parent | 52345d35622026b99271edc1c58ad7cfef3b7567 (diff) | |
parent | 04177158cf98a79744937893b100020d77e6f9ac (diff) |
Merge branch 'topic/compress-accel' into for-next
Pull compress-offload API extension for accel operation mode
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'lib/maple_tree.c')
-rw-r--r-- | lib/maple_tree.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 20990ecba2dd..3619301dda2e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2196,6 +2196,8 @@ static inline void mas_node_or_none(struct ma_state *mas, /* * mas_wr_node_walk() - Find the correct offset for the index in the @mas. + * If @mas->index cannot be found within the containing + * node, we traverse to the last entry in the node. * @wr_mas: The maple write state * * Uses mas_slot_locked() and does not need to worry about dead nodes. @@ -3532,7 +3534,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) return true; } -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; @@ -3541,11 +3543,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) wr_mas->content = mas_slot_locked(mas, wr_mas->slots, mas->offset); if (ma_is_leaf(wr_mas->type)) - return true; + return; mas_wr_walk_traverse(wr_mas); - } - return true; } /* * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. @@ -3765,8 +3765,8 @@ static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas) memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); - /* Copy r_mas into b_node. */ - if (r_mas.offset <= r_mas.end) + /* Copy r_mas into b_node if there is anything to copy. */ + if (r_mas.max > r_mas.last) mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, &b_node, b_node.b_end + 1); else @@ -4218,7 +4218,7 @@ static inline void mas_wr_store_type(struct ma_wr_state *wr_mas) /* Potential spanning rebalance collapsing a node */ if (new_end < mt_min_slots[wr_mas->type]) { - if (!mte_is_root(mas->node)) { + if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) { mas->store_type = wr_rebalance; return; } |