1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache
*maple_node_cache
;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max
[] = {
84 [maple_dense
] = MAPLE_NODE_SLOTS
,
85 [maple_leaf_64
] = ULONG_MAX
,
86 [maple_range_64
] = ULONG_MAX
,
87 [maple_arange_64
] = ULONG_MAX
,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots
[] = {
93 [maple_dense
] = MAPLE_NODE_SLOTS
,
94 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
,
95 [maple_range_64
] = MAPLE_RANGE64_SLOTS
,
96 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots
[] = {
102 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
- 1,
103 [maple_range_64
] = MAPLE_RANGE64_SLOTS
- 1,
104 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
- 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots
[] = {
109 [maple_dense
] = MAPLE_NODE_SLOTS
/ 2,
110 [maple_leaf_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
111 [maple_range_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
112 [maple_arange_64
] = (MAPLE_ARANGE64_SLOTS
/ 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node
{
120 struct maple_pnode
*parent
;
121 unsigned long pivot
[MAPLE_BIG_NODE_SLOTS
- 1];
123 struct maple_enode
*slot
[MAPLE_BIG_NODE_SLOTS
];
125 unsigned long padding
[MAPLE_BIG_NODE_GAPS
];
126 unsigned long gap
[MAPLE_BIG_NODE_GAPS
];
130 enum maple_type type
;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state
{
139 struct ma_state
*orig_l
; /* Original left side of subtree */
140 struct ma_state
*orig_r
; /* Original right side of subtree */
141 struct ma_state
*l
; /* New left side of subtree */
142 struct ma_state
*m
; /* New middle of subtree (rare) */
143 struct ma_state
*r
; /* New right side of subtree */
144 struct ma_topiary
*free
; /* nodes to be freed */
145 struct ma_topiary
*destroy
; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node
*bn
;
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
153 #define noinline_for_kasan inline
157 static inline struct maple_node
*mt_alloc_one(gfp_t gfp
)
159 return kmem_cache_alloc(maple_node_cache
, gfp
);
162 static inline int mt_alloc_bulk(gfp_t gfp
, size_t size
, void **nodes
)
164 return kmem_cache_alloc_bulk(maple_node_cache
, gfp
, size
, nodes
);
167 static inline void mt_free_bulk(size_t size
, void __rcu
**nodes
)
169 kmem_cache_free_bulk(maple_node_cache
, size
, (void **)nodes
);
172 static void mt_free_rcu(struct rcu_head
*head
)
174 struct maple_node
*node
= container_of(head
, struct maple_node
, rcu
);
176 kmem_cache_free(maple_node_cache
, node
);
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
186 static void ma_free_rcu(struct maple_node
*node
)
188 WARN_ON(node
->parent
!= ma_parent_ptr(node
));
189 call_rcu(&node
->rcu
, mt_free_rcu
);
192 static void mas_set_height(struct ma_state
*mas
)
194 unsigned int new_flags
= mas
->tree
->ma_flags
;
196 new_flags
&= ~MT_FLAGS_HEIGHT_MASK
;
197 MAS_BUG_ON(mas
, mas
->depth
> MAPLE_HEIGHT_MAX
);
198 new_flags
|= mas
->depth
<< MT_FLAGS_HEIGHT_OFFSET
;
199 mas
->tree
->ma_flags
= new_flags
;
202 static unsigned int mas_mt_height(struct ma_state
*mas
)
204 return mt_height(mas
->tree
);
207 static inline enum maple_type
mte_node_type(const struct maple_enode
*entry
)
209 return ((unsigned long)entry
>> MAPLE_NODE_TYPE_SHIFT
) &
210 MAPLE_NODE_TYPE_MASK
;
213 static inline bool ma_is_dense(const enum maple_type type
)
215 return type
< maple_leaf_64
;
218 static inline bool ma_is_leaf(const enum maple_type type
)
220 return type
< maple_range_64
;
223 static inline bool mte_is_leaf(const struct maple_enode
*entry
)
225 return ma_is_leaf(mte_node_type(entry
));
229 * We also reserve values with the bottom two bits set to '10' which are
232 static inline bool mt_is_reserved(const void *entry
)
234 return ((unsigned long)entry
< MAPLE_RESERVED_RANGE
) &&
235 xa_is_internal(entry
);
238 static inline void mas_set_err(struct ma_state
*mas
, long err
)
240 mas
->node
= MA_ERROR(err
);
243 static inline bool mas_is_ptr(const struct ma_state
*mas
)
245 return mas
->node
== MAS_ROOT
;
248 static inline bool mas_is_start(const struct ma_state
*mas
)
250 return mas
->node
== MAS_START
;
253 bool mas_is_err(struct ma_state
*mas
)
255 return xa_is_err(mas
->node
);
258 static inline bool mas_searchable(struct ma_state
*mas
)
260 if (mas_is_none(mas
))
269 static inline struct maple_node
*mte_to_node(const struct maple_enode
*entry
)
271 return (struct maple_node
*)((unsigned long)entry
& ~MAPLE_NODE_MASK
);
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
278 * Return: a maple topiary pointer
280 static inline struct maple_topiary
*mte_to_mat(const struct maple_enode
*entry
)
282 return (struct maple_topiary
*)
283 ((unsigned long)entry
& ~MAPLE_NODE_MASK
);
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
290 * Return: the maple node (not encoded - bare pointer).
292 static inline struct maple_node
*mas_mn(const struct ma_state
*mas
)
294 return mte_to_node(mas
->node
);
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
301 static inline void mte_set_node_dead(struct maple_enode
*mn
)
303 mte_to_node(mn
)->parent
= ma_parent_ptr(mte_to_node(mn
));
304 smp_wmb(); /* Needed for RCU */
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
314 static inline struct maple_enode
*mt_mk_node(const struct maple_node
*node
,
315 enum maple_type type
)
317 return (void *)((unsigned long)node
|
318 (type
<< MAPLE_ENODE_TYPE_SHIFT
) | MAPLE_ENODE_NULL
);
321 static inline void *mte_mk_root(const struct maple_enode
*node
)
323 return (void *)((unsigned long)node
| MAPLE_ROOT_NODE
);
326 static inline void *mte_safe_root(const struct maple_enode
*node
)
328 return (void *)((unsigned long)node
& ~MAPLE_ROOT_NODE
);
331 static inline void *mte_set_full(const struct maple_enode
*node
)
333 return (void *)((unsigned long)node
& ~MAPLE_ENODE_NULL
);
336 static inline void *mte_clear_full(const struct maple_enode
*node
)
338 return (void *)((unsigned long)node
| MAPLE_ENODE_NULL
);
341 static inline bool mte_has_null(const struct maple_enode
*node
)
343 return (unsigned long)node
& MAPLE_ENODE_NULL
;
346 static inline bool ma_is_root(struct maple_node
*node
)
348 return ((unsigned long)node
->parent
& MA_ROOT_PARENT
);
351 static inline bool mte_is_root(const struct maple_enode
*node
)
353 return ma_is_root(mte_to_node(node
));
356 static inline bool mas_is_root_limits(const struct ma_state
*mas
)
358 return !mas
->min
&& mas
->max
== ULONG_MAX
;
361 static inline bool mt_is_alloc(struct maple_tree
*mt
)
363 return (mt
->ma_flags
& MT_FLAGS_ALLOC_RANGE
);
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
380 * Slot size and alignment
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
387 #define MAPLE_PARENT_ROOT 0x01
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
404 static inline unsigned long mte_parent_shift(unsigned long parent
)
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
408 return MAPLE_PARENT_SLOT_SHIFT
;
410 return MAPLE_PARENT_16B_SLOT_SHIFT
;
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent
)
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
422 return MAPLE_PARENT_SLOT_MASK
;
424 return MAPLE_PARENT_16B_SLOT_MASK
;
428 * mas_parent_type() - Return the maple_type of the parent from the stored
430 * @mas: The maple state
431 * @enode: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
435 enum maple_type
mas_parent_type(struct ma_state
*mas
, struct maple_enode
*enode
)
437 unsigned long p_type
;
439 p_type
= (unsigned long)mte_to_node(enode
)->parent
;
440 if (WARN_ON(p_type
& MAPLE_PARENT_ROOT
))
443 p_type
&= MAPLE_NODE_MASK
;
444 p_type
&= ~mte_parent_slot_mask(p_type
);
446 case MAPLE_PARENT_RANGE64
: /* or MAPLE_PARENT_ARANGE64 */
447 if (mt_is_alloc(mas
->tree
))
448 return maple_arange_64
;
449 return maple_range_64
;
456 * mas_set_parent() - Set the parent node and encode the slot
457 * @enode: The encoded maple node.
458 * @parent: The encoded maple node that is the parent of @enode.
459 * @slot: The slot that @enode resides in @parent.
461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
465 void mas_set_parent(struct ma_state
*mas
, struct maple_enode
*enode
,
466 const struct maple_enode
*parent
, unsigned char slot
)
468 unsigned long val
= (unsigned long)parent
;
471 enum maple_type p_type
= mte_node_type(parent
);
473 MAS_BUG_ON(mas
, p_type
== maple_dense
);
474 MAS_BUG_ON(mas
, p_type
== maple_leaf_64
);
478 case maple_arange_64
:
479 shift
= MAPLE_PARENT_SLOT_SHIFT
;
480 type
= MAPLE_PARENT_RANGE64
;
489 val
&= ~MAPLE_NODE_MASK
; /* Clear all node metadata in parent */
490 val
|= (slot
<< shift
) | type
;
491 mte_to_node(enode
)->parent
= ma_parent_ptr(val
);
495 * mte_parent_slot() - get the parent slot of @enode.
496 * @enode: The encoded maple node.
498 * Return: The slot in the parent node where @enode resides.
500 static inline unsigned int mte_parent_slot(const struct maple_enode
*enode
)
502 unsigned long val
= (unsigned long)mte_to_node(enode
)->parent
;
504 if (val
& MA_ROOT_PARENT
)
508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
511 return (val
& MAPLE_PARENT_16B_SLOT_MASK
) >> mte_parent_shift(val
);
515 * mte_parent() - Get the parent of @node.
516 * @node: The encoded maple node.
518 * Return: The parent maple node.
520 static inline struct maple_node
*mte_parent(const struct maple_enode
*enode
)
522 return (void *)((unsigned long)
523 (mte_to_node(enode
)->parent
) & ~MAPLE_NODE_MASK
);
527 * ma_dead_node() - check if the @enode is dead.
528 * @enode: The encoded maple node
530 * Return: true if dead, false otherwise.
532 static inline bool ma_dead_node(const struct maple_node
*node
)
534 struct maple_node
*parent
;
536 /* Do not reorder reads from the node prior to the parent check */
538 parent
= (void *)((unsigned long) node
->parent
& ~MAPLE_NODE_MASK
);
539 return (parent
== node
);
543 * mte_dead_node() - check if the @enode is dead.
544 * @enode: The encoded maple node
546 * Return: true if dead, false otherwise.
548 static inline bool mte_dead_node(const struct maple_enode
*enode
)
550 struct maple_node
*parent
, *node
;
552 node
= mte_to_node(enode
);
553 /* Do not reorder reads from the node prior to the parent check */
555 parent
= mte_parent(enode
);
556 return (parent
== node
);
560 * mas_allocated() - Get the number of nodes allocated in a maple state.
561 * @mas: The maple state
563 * The ma_state alloc member is overloaded to hold a pointer to the first
564 * allocated node or to the number of requested nodes to allocate. If bit 0 is
565 * set, then the alloc contains the number of requested nodes. If there is an
566 * allocated node, then the total allocated nodes is in that node.
568 * Return: The total number of nodes allocated
570 static inline unsigned long mas_allocated(const struct ma_state
*mas
)
572 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1))
575 return mas
->alloc
->total
;
579 * mas_set_alloc_req() - Set the requested number of allocations.
580 * @mas: the maple state
581 * @count: the number of allocations.
583 * The requested number of allocations is either in the first allocated node,
584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
585 * no allocated node. Set the request either in the node or do the necessary
586 * encoding to store in @mas->alloc directly.
588 static inline void mas_set_alloc_req(struct ma_state
*mas
, unsigned long count
)
590 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1)) {
594 mas
->alloc
= (struct maple_alloc
*)(((count
) << 1U) | 1U);
598 mas
->alloc
->request_count
= count
;
602 * mas_alloc_req() - get the requested number of allocations.
603 * @mas: The maple state
605 * The alloc count is either stored directly in @mas, or in
606 * @mas->alloc->request_count if there is at least one node allocated. Decode
607 * the request count if it's stored directly in @mas->alloc.
609 * Return: The allocation request count.
611 static inline unsigned int mas_alloc_req(const struct ma_state
*mas
)
613 if ((unsigned long)mas
->alloc
& 0x1)
614 return (unsigned long)(mas
->alloc
) >> 1;
616 return mas
->alloc
->request_count
;
621 * ma_pivots() - Get a pointer to the maple node pivots.
622 * @node - the maple node
623 * @type - the node type
625 * In the event of a dead node, this array may be %NULL
627 * Return: A pointer to the maple node pivots
629 static inline unsigned long *ma_pivots(struct maple_node
*node
,
630 enum maple_type type
)
633 case maple_arange_64
:
634 return node
->ma64
.pivot
;
637 return node
->mr64
.pivot
;
645 * ma_gaps() - Get a pointer to the maple node gaps.
646 * @node - the maple node
647 * @type - the node type
649 * Return: A pointer to the maple node gaps
651 static inline unsigned long *ma_gaps(struct maple_node
*node
,
652 enum maple_type type
)
655 case maple_arange_64
:
656 return node
->ma64
.gap
;
666 * mas_pivot() - Get the pivot at @piv of the maple encoded node.
667 * @mas: The maple state.
670 * Return: the pivot at @piv of @mn.
672 static inline unsigned long mas_pivot(struct ma_state
*mas
, unsigned char piv
)
674 struct maple_node
*node
= mas_mn(mas
);
675 enum maple_type type
= mte_node_type(mas
->node
);
677 if (MAS_WARN_ON(mas
, piv
>= mt_pivots
[type
])) {
678 mas_set_err(mas
, -EIO
);
683 case maple_arange_64
:
684 return node
->ma64
.pivot
[piv
];
687 return node
->mr64
.pivot
[piv
];
695 * mas_safe_pivot() - get the pivot at @piv or mas->max.
696 * @mas: The maple state
697 * @pivots: The pointer to the maple node pivots
698 * @piv: The pivot to fetch
699 * @type: The maple node type
701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
704 static inline unsigned long
705 mas_safe_pivot(const struct ma_state
*mas
, unsigned long *pivots
,
706 unsigned char piv
, enum maple_type type
)
708 if (piv
>= mt_pivots
[type
])
715 * mas_safe_min() - Return the minimum for a given offset.
716 * @mas: The maple state
717 * @pivots: The pointer to the maple node pivots
718 * @offset: The offset into the pivot array
720 * Return: The minimum range value that is contained in @offset.
722 static inline unsigned long
723 mas_safe_min(struct ma_state
*mas
, unsigned long *pivots
, unsigned char offset
)
726 return pivots
[offset
- 1] + 1;
732 * mas_logical_pivot() - Get the logical pivot of a given offset.
733 * @mas: The maple state
734 * @pivots: The pointer to the maple node pivots
735 * @offset: The offset into the pivot array
736 * @type: The maple node type
738 * When there is no value at a pivot (beyond the end of the data), then the
739 * pivot is actually @mas->max.
741 * Return: the logical pivot of a given @offset.
743 static inline unsigned long
744 mas_logical_pivot(struct ma_state
*mas
, unsigned long *pivots
,
745 unsigned char offset
, enum maple_type type
)
747 unsigned long lpiv
= mas_safe_pivot(mas
, pivots
, offset
, type
);
759 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
760 * @mn: The encoded maple node
761 * @piv: The pivot offset
762 * @val: The value of the pivot
764 static inline void mte_set_pivot(struct maple_enode
*mn
, unsigned char piv
,
767 struct maple_node
*node
= mte_to_node(mn
);
768 enum maple_type type
= mte_node_type(mn
);
770 BUG_ON(piv
>= mt_pivots
[type
]);
775 node
->mr64
.pivot
[piv
] = val
;
777 case maple_arange_64
:
778 node
->ma64
.pivot
[piv
] = val
;
787 * ma_slots() - Get a pointer to the maple node slots.
788 * @mn: The maple node
789 * @mt: The maple node type
791 * Return: A pointer to the maple node slots
793 static inline void __rcu
**ma_slots(struct maple_node
*mn
, enum maple_type mt
)
797 case maple_arange_64
:
798 return mn
->ma64
.slot
;
801 return mn
->mr64
.slot
;
807 static inline bool mt_locked(const struct maple_tree
*mt
)
809 return mt_external_lock(mt
) ? mt_lock_is_held(mt
) :
810 lockdep_is_held(&mt
->ma_lock
);
813 static inline void *mt_slot(const struct maple_tree
*mt
,
814 void __rcu
**slots
, unsigned char offset
)
816 return rcu_dereference_check(slots
[offset
], mt_locked(mt
));
819 static inline void *mt_slot_locked(struct maple_tree
*mt
, void __rcu
**slots
,
820 unsigned char offset
)
822 return rcu_dereference_protected(slots
[offset
], mt_locked(mt
));
825 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
826 * @mas: The maple state
827 * @slots: The pointer to the slots
828 * @offset: The offset into the slots array to fetch
830 * Return: The entry stored in @slots at the @offset.
832 static inline void *mas_slot_locked(struct ma_state
*mas
, void __rcu
**slots
,
833 unsigned char offset
)
835 return mt_slot_locked(mas
->tree
, slots
, offset
);
839 * mas_slot() - Get the slot value when not holding the maple tree lock.
840 * @mas: The maple state
841 * @slots: The pointer to the slots
842 * @offset: The offset into the slots array to fetch
844 * Return: The entry stored in @slots at the @offset
846 static inline void *mas_slot(struct ma_state
*mas
, void __rcu
**slots
,
847 unsigned char offset
)
849 return mt_slot(mas
->tree
, slots
, offset
);
853 * mas_root() - Get the maple tree root.
854 * @mas: The maple state.
856 * Return: The pointer to the root of the tree
858 static inline void *mas_root(struct ma_state
*mas
)
860 return rcu_dereference_check(mas
->tree
->ma_root
, mt_locked(mas
->tree
));
863 static inline void *mt_root_locked(struct maple_tree
*mt
)
865 return rcu_dereference_protected(mt
->ma_root
, mt_locked(mt
));
869 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
870 * @mas: The maple state.
872 * Return: The pointer to the root of the tree
874 static inline void *mas_root_locked(struct ma_state
*mas
)
876 return mt_root_locked(mas
->tree
);
879 static inline struct maple_metadata
*ma_meta(struct maple_node
*mn
,
883 case maple_arange_64
:
884 return &mn
->ma64
.meta
;
886 return &mn
->mr64
.meta
;
891 * ma_set_meta() - Set the metadata information of a node.
892 * @mn: The maple node
893 * @mt: The maple node type
894 * @offset: The offset of the highest sub-gap in this node.
895 * @end: The end of the data in this node.
897 static inline void ma_set_meta(struct maple_node
*mn
, enum maple_type mt
,
898 unsigned char offset
, unsigned char end
)
900 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
907 * mt_clear_meta() - clear the metadata information of a node, if it exists
908 * @mt: The maple tree
909 * @mn: The maple node
910 * @type: The maple node type
911 * @offset: The offset of the highest sub-gap in this node.
912 * @end: The end of the data in this node.
914 static inline void mt_clear_meta(struct maple_tree
*mt
, struct maple_node
*mn
,
915 enum maple_type type
)
917 struct maple_metadata
*meta
;
918 unsigned long *pivots
;
924 pivots
= mn
->mr64
.pivot
;
925 if (unlikely(pivots
[MAPLE_RANGE64_SLOTS
- 2])) {
926 slots
= mn
->mr64
.slot
;
927 next
= mt_slot_locked(mt
, slots
,
928 MAPLE_RANGE64_SLOTS
- 1);
929 if (unlikely((mte_to_node(next
) &&
930 mte_node_type(next
))))
931 return; /* no metadata, could be node */
934 case maple_arange_64
:
935 meta
= ma_meta(mn
, type
);
946 * ma_meta_end() - Get the data end of a node from the metadata
947 * @mn: The maple node
948 * @mt: The maple node type
950 static inline unsigned char ma_meta_end(struct maple_node
*mn
,
953 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
959 * ma_meta_gap() - Get the largest gap location of a node from the metadata
960 * @mn: The maple node
961 * @mt: The maple node type
963 static inline unsigned char ma_meta_gap(struct maple_node
*mn
,
966 return mn
->ma64
.meta
.gap
;
970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
971 * @mn: The maple node
972 * @mn: The maple node type
973 * @offset: The location of the largest gap.
975 static inline void ma_set_meta_gap(struct maple_node
*mn
, enum maple_type mt
,
976 unsigned char offset
)
979 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
986 * @mat - the ma_topiary, a linked list of dead nodes.
987 * @dead_enode - the node to be marked as dead and added to the tail of the list
989 * Add the @dead_enode to the linked list in @mat.
991 static inline void mat_add(struct ma_topiary
*mat
,
992 struct maple_enode
*dead_enode
)
994 mte_set_node_dead(dead_enode
);
995 mte_to_mat(dead_enode
)->next
= NULL
;
997 mat
->tail
= mat
->head
= dead_enode
;
1001 mte_to_mat(mat
->tail
)->next
= dead_enode
;
1002 mat
->tail
= dead_enode
;
1005 static void mte_destroy_walk(struct maple_enode
*, struct maple_tree
*);
1006 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
);
1009 * mas_mat_free() - Free all nodes in a dead list.
1010 * @mas - the maple state
1011 * @mat - the ma_topiary linked list of dead nodes to free.
1013 * Free walk a dead list.
1015 static void mas_mat_free(struct ma_state
*mas
, struct ma_topiary
*mat
)
1017 struct maple_enode
*next
;
1020 next
= mte_to_mat(mat
->head
)->next
;
1021 mas_free(mas
, mat
->head
);
1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1028 * @mas - the maple state
1029 * @mat - the ma_topiary linked list of dead nodes to free.
1031 * Destroy walk a dead list.
1033 static void mas_mat_destroy(struct ma_state
*mas
, struct ma_topiary
*mat
)
1035 struct maple_enode
*next
;
1038 next
= mte_to_mat(mat
->head
)->next
;
1039 mte_destroy_walk(mat
->head
, mat
->mtree
);
1044 * mas_descend() - Descend into the slot stored in the ma_state.
1045 * @mas - the maple state.
1047 * Note: Not RCU safe, only use in write side or debug code.
1049 static inline void mas_descend(struct ma_state
*mas
)
1051 enum maple_type type
;
1052 unsigned long *pivots
;
1053 struct maple_node
*node
;
1057 type
= mte_node_type(mas
->node
);
1058 pivots
= ma_pivots(node
, type
);
1059 slots
= ma_slots(node
, type
);
1062 mas
->min
= pivots
[mas
->offset
- 1] + 1;
1063 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, type
);
1064 mas
->node
= mas_slot(mas
, slots
, mas
->offset
);
1068 * mte_set_gap() - Set a maple node gap.
1069 * @mn: The encoded maple node
1070 * @gap: The offset of the gap to set
1071 * @val: The gap value
1073 static inline void mte_set_gap(const struct maple_enode
*mn
,
1074 unsigned char gap
, unsigned long val
)
1076 switch (mte_node_type(mn
)) {
1079 case maple_arange_64
:
1080 mte_to_node(mn
)->ma64
.gap
[gap
] = val
;
1086 * mas_ascend() - Walk up a level of the tree.
1087 * @mas: The maple state
1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1090 * may cause several levels of walking up to find the correct min and max.
1091 * May find a dead node which will cause a premature return.
1092 * Return: 1 on dead node, 0 otherwise
1094 static int mas_ascend(struct ma_state
*mas
)
1096 struct maple_enode
*p_enode
; /* parent enode. */
1097 struct maple_enode
*a_enode
; /* ancestor enode. */
1098 struct maple_node
*a_node
; /* ancestor node. */
1099 struct maple_node
*p_node
; /* parent node. */
1100 unsigned char a_slot
;
1101 enum maple_type a_type
;
1102 unsigned long min
, max
;
1103 unsigned long *pivots
;
1104 bool set_max
= false, set_min
= false;
1106 a_node
= mas_mn(mas
);
1107 if (ma_is_root(a_node
)) {
1112 p_node
= mte_parent(mas
->node
);
1113 if (unlikely(a_node
== p_node
))
1116 a_type
= mas_parent_type(mas
, mas
->node
);
1117 mas
->offset
= mte_parent_slot(mas
->node
);
1118 a_enode
= mt_mk_node(p_node
, a_type
);
1120 /* Check to make sure all parent information is still accurate */
1121 if (p_node
!= mte_parent(mas
->node
))
1124 mas
->node
= a_enode
;
1126 if (mte_is_root(a_enode
)) {
1127 mas
->max
= ULONG_MAX
;
1135 if (mas
->max
== ULONG_MAX
)
1142 a_type
= mas_parent_type(mas
, p_enode
);
1143 a_node
= mte_parent(p_enode
);
1144 a_slot
= mte_parent_slot(p_enode
);
1145 a_enode
= mt_mk_node(a_node
, a_type
);
1146 pivots
= ma_pivots(a_node
, a_type
);
1148 if (unlikely(ma_dead_node(a_node
)))
1151 if (!set_min
&& a_slot
) {
1153 min
= pivots
[a_slot
- 1] + 1;
1156 if (!set_max
&& a_slot
< mt_pivots
[a_type
]) {
1158 max
= pivots
[a_slot
];
1161 if (unlikely(ma_dead_node(a_node
)))
1164 if (unlikely(ma_is_root(a_node
)))
1167 } while (!set_min
|| !set_max
);
1175 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1176 * @mas: The maple state
1178 * Return: A pointer to a maple node.
1180 static inline struct maple_node
*mas_pop_node(struct ma_state
*mas
)
1182 struct maple_alloc
*ret
, *node
= mas
->alloc
;
1183 unsigned long total
= mas_allocated(mas
);
1184 unsigned int req
= mas_alloc_req(mas
);
1186 /* nothing or a request pending. */
1187 if (WARN_ON(!total
))
1191 /* single allocation in this ma_state */
1197 if (node
->node_count
== 1) {
1198 /* Single allocation in this node. */
1199 mas
->alloc
= node
->slot
[0];
1200 mas
->alloc
->total
= node
->total
- 1;
1205 ret
= node
->slot
[--node
->node_count
];
1206 node
->slot
[node
->node_count
] = NULL
;
1212 mas_set_alloc_req(mas
, req
);
1215 memset(ret
, 0, sizeof(*ret
));
1216 return (struct maple_node
*)ret
;
1220 * mas_push_node() - Push a node back on the maple state allocation.
1221 * @mas: The maple state
1222 * @used: The used maple node
1224 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1225 * requested node count as necessary.
1227 static inline void mas_push_node(struct ma_state
*mas
, struct maple_node
*used
)
1229 struct maple_alloc
*reuse
= (struct maple_alloc
*)used
;
1230 struct maple_alloc
*head
= mas
->alloc
;
1231 unsigned long count
;
1232 unsigned int requested
= mas_alloc_req(mas
);
1234 count
= mas_allocated(mas
);
1236 reuse
->request_count
= 0;
1237 reuse
->node_count
= 0;
1238 if (count
&& (head
->node_count
< MAPLE_ALLOC_SLOTS
)) {
1239 head
->slot
[head
->node_count
++] = reuse
;
1245 if ((head
) && !((unsigned long)head
& 0x1)) {
1246 reuse
->slot
[0] = head
;
1247 reuse
->node_count
= 1;
1248 reuse
->total
+= head
->total
;
1254 mas_set_alloc_req(mas
, requested
- 1);
1258 * mas_alloc_nodes() - Allocate nodes into a maple state
1259 * @mas: The maple state
1260 * @gfp: The GFP Flags
1262 static inline void mas_alloc_nodes(struct ma_state
*mas
, gfp_t gfp
)
1264 struct maple_alloc
*node
;
1265 unsigned long allocated
= mas_allocated(mas
);
1266 unsigned int requested
= mas_alloc_req(mas
);
1268 void **slots
= NULL
;
1269 unsigned int max_req
= 0;
1274 mas_set_alloc_req(mas
, 0);
1275 if (mas
->mas_flags
& MA_STATE_PREALLOC
) {
1278 WARN_ON(!allocated
);
1281 if (!allocated
|| mas
->alloc
->node_count
== MAPLE_ALLOC_SLOTS
) {
1282 node
= (struct maple_alloc
*)mt_alloc_one(gfp
);
1287 node
->slot
[0] = mas
->alloc
;
1288 node
->node_count
= 1;
1290 node
->node_count
= 0;
1294 node
->total
= ++allocated
;
1299 node
->request_count
= 0;
1301 max_req
= MAPLE_ALLOC_SLOTS
- node
->node_count
;
1302 slots
= (void **)&node
->slot
[node
->node_count
];
1303 max_req
= min(requested
, max_req
);
1304 count
= mt_alloc_bulk(gfp
, max_req
, slots
);
1308 if (node
->node_count
== 0) {
1309 node
->slot
[0]->node_count
= 0;
1310 node
->slot
[0]->request_count
= 0;
1313 node
->node_count
+= count
;
1315 node
= node
->slot
[0];
1318 mas
->alloc
->total
= allocated
;
1322 /* Clean up potential freed allocations on bulk failure */
1323 memset(slots
, 0, max_req
* sizeof(unsigned long));
1325 mas_set_alloc_req(mas
, requested
);
1326 if (mas
->alloc
&& !(((unsigned long)mas
->alloc
& 0x1)))
1327 mas
->alloc
->total
= allocated
;
1328 mas_set_err(mas
, -ENOMEM
);
1332 * mas_free() - Free an encoded maple node
1333 * @mas: The maple state
1334 * @used: The encoded maple node to free.
1336 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1339 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
)
1341 struct maple_node
*tmp
= mte_to_node(used
);
1343 if (mt_in_rcu(mas
->tree
))
1346 mas_push_node(mas
, tmp
);
1350 * mas_node_count() - Check if enough nodes are allocated and request more if
1351 * there is not enough nodes.
1352 * @mas: The maple state
1353 * @count: The number of nodes needed
1354 * @gfp: the gfp flags
1356 static void mas_node_count_gfp(struct ma_state
*mas
, int count
, gfp_t gfp
)
1358 unsigned long allocated
= mas_allocated(mas
);
1360 if (allocated
< count
) {
1361 mas_set_alloc_req(mas
, count
- allocated
);
1362 mas_alloc_nodes(mas
, gfp
);
1367 * mas_node_count() - Check if enough nodes are allocated and request more if
1368 * there is not enough nodes.
1369 * @mas: The maple state
1370 * @count: The number of nodes needed
1372 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1374 static void mas_node_count(struct ma_state
*mas
, int count
)
1376 return mas_node_count_gfp(mas
, count
, GFP_NOWAIT
| __GFP_NOWARN
);
1380 * mas_start() - Sets up maple state for operations.
1381 * @mas: The maple state.
1383 * If mas->node == MAS_START, then set the min, max and depth to
1387 * - If mas->node is an error or not MAS_START, return NULL.
1388 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1389 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1390 * - If it's a tree: NULL & mas->node == safe root node.
1392 static inline struct maple_enode
*mas_start(struct ma_state
*mas
)
1394 if (likely(mas_is_start(mas
))) {
1395 struct maple_enode
*root
;
1398 mas
->max
= ULONG_MAX
;
1402 root
= mas_root(mas
);
1403 /* Tree with nodes */
1404 if (likely(xa_is_node(root
))) {
1406 mas
->node
= mte_safe_root(root
);
1408 if (mte_dead_node(mas
->node
))
1415 if (unlikely(!root
)) {
1416 mas
->node
= MAS_NONE
;
1417 mas
->offset
= MAPLE_NODE_SLOTS
;
1421 /* Single entry tree */
1422 mas
->node
= MAS_ROOT
;
1423 mas
->offset
= MAPLE_NODE_SLOTS
;
1425 /* Single entry tree. */
1436 * ma_data_end() - Find the end of the data in a node.
1437 * @node: The maple node
1438 * @type: The maple node type
1439 * @pivots: The array of pivots in the node
1440 * @max: The maximum value in the node
1442 * Uses metadata to find the end of the data when possible.
1443 * Return: The zero indexed last slot with data (may be null).
1445 static inline unsigned char ma_data_end(struct maple_node
*node
,
1446 enum maple_type type
,
1447 unsigned long *pivots
,
1450 unsigned char offset
;
1455 if (type
== maple_arange_64
)
1456 return ma_meta_end(node
, type
);
1458 offset
= mt_pivots
[type
] - 1;
1459 if (likely(!pivots
[offset
]))
1460 return ma_meta_end(node
, type
);
1462 if (likely(pivots
[offset
] == max
))
1465 return mt_pivots
[type
];
1469 * mas_data_end() - Find the end of the data (slot).
1470 * @mas: the maple state
1472 * This method is optimized to check the metadata of a node if the node type
1473 * supports data end metadata.
1475 * Return: The zero indexed last slot with data (may be null).
1477 static inline unsigned char mas_data_end(struct ma_state
*mas
)
1479 enum maple_type type
;
1480 struct maple_node
*node
;
1481 unsigned char offset
;
1482 unsigned long *pivots
;
1484 type
= mte_node_type(mas
->node
);
1486 if (type
== maple_arange_64
)
1487 return ma_meta_end(node
, type
);
1489 pivots
= ma_pivots(node
, type
);
1490 if (unlikely(ma_dead_node(node
)))
1493 offset
= mt_pivots
[type
] - 1;
1494 if (likely(!pivots
[offset
]))
1495 return ma_meta_end(node
, type
);
1497 if (likely(pivots
[offset
] == mas
->max
))
1500 return mt_pivots
[type
];
1504 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1505 * @mas - the maple state
1507 * Return: The maximum gap in the leaf.
1509 static unsigned long mas_leaf_max_gap(struct ma_state
*mas
)
1512 unsigned long pstart
, gap
, max_gap
;
1513 struct maple_node
*mn
;
1514 unsigned long *pivots
;
1517 unsigned char max_piv
;
1519 mt
= mte_node_type(mas
->node
);
1521 slots
= ma_slots(mn
, mt
);
1523 if (unlikely(ma_is_dense(mt
))) {
1525 for (i
= 0; i
< mt_slots
[mt
]; i
++) {
1540 * Check the first implied pivot optimizes the loop below and slot 1 may
1541 * be skipped if there is a gap in slot 0.
1543 pivots
= ma_pivots(mn
, mt
);
1544 if (likely(!slots
[0])) {
1545 max_gap
= pivots
[0] - mas
->min
+ 1;
1551 /* reduce max_piv as the special case is checked before the loop */
1552 max_piv
= ma_data_end(mn
, mt
, pivots
, mas
->max
) - 1;
1554 * Check end implied pivot which can only be a gap on the right most
1557 if (unlikely(mas
->max
== ULONG_MAX
) && !slots
[max_piv
+ 1]) {
1558 gap
= ULONG_MAX
- pivots
[max_piv
];
1563 for (; i
<= max_piv
; i
++) {
1564 /* data == no gap. */
1565 if (likely(slots
[i
]))
1568 pstart
= pivots
[i
- 1];
1569 gap
= pivots
[i
] - pstart
;
1573 /* There cannot be two gaps in a row. */
1580 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1581 * @node: The maple node
1582 * @gaps: The pointer to the gaps
1583 * @mt: The maple node type
1584 * @*off: Pointer to store the offset location of the gap.
1586 * Uses the metadata data end to scan backwards across set gaps.
1588 * Return: The maximum gap value
1590 static inline unsigned long
1591 ma_max_gap(struct maple_node
*node
, unsigned long *gaps
, enum maple_type mt
,
1594 unsigned char offset
, i
;
1595 unsigned long max_gap
= 0;
1597 i
= offset
= ma_meta_end(node
, mt
);
1599 if (gaps
[i
] > max_gap
) {
1610 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1611 * @mas: The maple state.
1613 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1615 * Return: The gap value.
1617 static inline unsigned long mas_max_gap(struct ma_state
*mas
)
1619 unsigned long *gaps
;
1620 unsigned char offset
;
1622 struct maple_node
*node
;
1624 mt
= mte_node_type(mas
->node
);
1626 return mas_leaf_max_gap(mas
);
1629 MAS_BUG_ON(mas
, mt
!= maple_arange_64
);
1630 offset
= ma_meta_gap(node
, mt
);
1631 if (offset
== MAPLE_ARANGE64_META_MAX
)
1634 gaps
= ma_gaps(node
, mt
);
1635 return gaps
[offset
];
1639 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1640 * @mas: The maple state
1641 * @offset: The gap offset in the parent to set
1642 * @new: The new gap value.
1644 * Set the parent gap then continue to set the gap upwards, using the metadata
1645 * of the parent to see if it is necessary to check the node above.
1647 static inline void mas_parent_gap(struct ma_state
*mas
, unsigned char offset
,
1650 unsigned long meta_gap
= 0;
1651 struct maple_node
*pnode
;
1652 struct maple_enode
*penode
;
1653 unsigned long *pgaps
;
1654 unsigned char meta_offset
;
1655 enum maple_type pmt
;
1657 pnode
= mte_parent(mas
->node
);
1658 pmt
= mas_parent_type(mas
, mas
->node
);
1659 penode
= mt_mk_node(pnode
, pmt
);
1660 pgaps
= ma_gaps(pnode
, pmt
);
1663 MAS_BUG_ON(mas
, pmt
!= maple_arange_64
);
1664 meta_offset
= ma_meta_gap(pnode
, pmt
);
1665 if (meta_offset
== MAPLE_ARANGE64_META_MAX
)
1668 meta_gap
= pgaps
[meta_offset
];
1670 pgaps
[offset
] = new;
1672 if (meta_gap
== new)
1675 if (offset
!= meta_offset
) {
1679 ma_set_meta_gap(pnode
, pmt
, offset
);
1680 } else if (new < meta_gap
) {
1682 new = ma_max_gap(pnode
, pgaps
, pmt
, &meta_offset
);
1683 ma_set_meta_gap(pnode
, pmt
, meta_offset
);
1686 if (ma_is_root(pnode
))
1689 /* Go to the parent node. */
1690 pnode
= mte_parent(penode
);
1691 pmt
= mas_parent_type(mas
, penode
);
1692 pgaps
= ma_gaps(pnode
, pmt
);
1693 offset
= mte_parent_slot(penode
);
1694 penode
= mt_mk_node(pnode
, pmt
);
1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1700 * @mas - the maple state.
1702 static inline void mas_update_gap(struct ma_state
*mas
)
1704 unsigned char pslot
;
1705 unsigned long p_gap
;
1706 unsigned long max_gap
;
1708 if (!mt_is_alloc(mas
->tree
))
1711 if (mte_is_root(mas
->node
))
1714 max_gap
= mas_max_gap(mas
);
1716 pslot
= mte_parent_slot(mas
->node
);
1717 p_gap
= ma_gaps(mte_parent(mas
->node
),
1718 mas_parent_type(mas
, mas
->node
))[pslot
];
1720 if (p_gap
!= max_gap
)
1721 mas_parent_gap(mas
, pslot
, max_gap
);
1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1726 * @parent with the slot encoded.
1727 * @mas - the maple state (for the tree)
1728 * @parent - the maple encoded node containing the children.
1730 static inline void mas_adopt_children(struct ma_state
*mas
,
1731 struct maple_enode
*parent
)
1733 enum maple_type type
= mte_node_type(parent
);
1734 struct maple_node
*node
= mas_mn(mas
);
1735 void __rcu
**slots
= ma_slots(node
, type
);
1736 unsigned long *pivots
= ma_pivots(node
, type
);
1737 struct maple_enode
*child
;
1738 unsigned char offset
;
1740 offset
= ma_data_end(node
, type
, pivots
, mas
->max
);
1742 child
= mas_slot_locked(mas
, slots
, offset
);
1743 mas_set_parent(mas
, child
, parent
, offset
);
1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1749 * parent encoding to locate the maple node in the tree.
1750 * @mas - the ma_state to use for operations.
1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1752 * leave the node (true) and handle the adoption and free elsewhere.
1754 static inline void mas_replace(struct ma_state
*mas
, bool advanced
)
1755 __must_hold(mas
->tree
->ma_lock
)
1757 struct maple_node
*mn
= mas_mn(mas
);
1758 struct maple_enode
*old_enode
;
1759 unsigned char offset
= 0;
1760 void __rcu
**slots
= NULL
;
1762 if (ma_is_root(mn
)) {
1763 old_enode
= mas_root_locked(mas
);
1765 offset
= mte_parent_slot(mas
->node
);
1766 slots
= ma_slots(mte_parent(mas
->node
),
1767 mas_parent_type(mas
, mas
->node
));
1768 old_enode
= mas_slot_locked(mas
, slots
, offset
);
1771 if (!advanced
&& !mte_is_leaf(mas
->node
))
1772 mas_adopt_children(mas
, mas
->node
);
1774 if (mte_is_root(mas
->node
)) {
1775 mn
->parent
= ma_parent_ptr(
1776 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
1777 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
1778 mas_set_height(mas
);
1780 rcu_assign_pointer(slots
[offset
], mas
->node
);
1784 mte_set_node_dead(old_enode
);
1785 mas_free(mas
, old_enode
);
1790 * mas_new_child() - Find the new child of a node.
1791 * @mas: the maple state
1792 * @child: the maple state to store the child.
1794 static inline bool mas_new_child(struct ma_state
*mas
, struct ma_state
*child
)
1795 __must_hold(mas
->tree
->ma_lock
)
1798 unsigned char offset
;
1800 unsigned long *pivots
;
1801 struct maple_enode
*entry
;
1802 struct maple_node
*node
;
1805 mt
= mte_node_type(mas
->node
);
1807 slots
= ma_slots(node
, mt
);
1808 pivots
= ma_pivots(node
, mt
);
1809 end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
1810 for (offset
= mas
->offset
; offset
<= end
; offset
++) {
1811 entry
= mas_slot_locked(mas
, slots
, offset
);
1812 if (mte_parent(entry
) == node
) {
1814 mas
->offset
= offset
+ 1;
1815 child
->offset
= offset
;
1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1826 * old data or set b_node->b_end.
1827 * @b_node: the maple_big_node
1828 * @shift: the shift count
1830 static inline void mab_shift_right(struct maple_big_node
*b_node
,
1831 unsigned char shift
)
1833 unsigned long size
= b_node
->b_end
* sizeof(unsigned long);
1835 memmove(b_node
->pivot
+ shift
, b_node
->pivot
, size
);
1836 memmove(b_node
->slot
+ shift
, b_node
->slot
, size
);
1837 if (b_node
->type
== maple_arange_64
)
1838 memmove(b_node
->gap
+ shift
, b_node
->gap
, size
);
1842 * mab_middle_node() - Check if a middle node is needed (unlikely)
1843 * @b_node: the maple_big_node that contains the data.
1844 * @size: the amount of data in the b_node
1845 * @split: the potential split location
1846 * @slot_count: the size that can be stored in a single node being considered.
1848 * Return: true if a middle node is required.
1850 static inline bool mab_middle_node(struct maple_big_node
*b_node
, int split
,
1851 unsigned char slot_count
)
1853 unsigned char size
= b_node
->b_end
;
1855 if (size
>= 2 * slot_count
)
1858 if (!b_node
->slot
[split
] && (size
>= 2 * slot_count
- 1))
1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1866 * @b_node: the maple_big_node with the data
1867 * @split: the suggested split location
1868 * @slot_count: the number of slots in the node being considered.
1870 * Return: the split location.
1872 static inline int mab_no_null_split(struct maple_big_node
*b_node
,
1873 unsigned char split
, unsigned char slot_count
)
1875 if (!b_node
->slot
[split
]) {
1877 * If the split is less than the max slot && the right side will
1878 * still be sufficient, then increment the split on NULL.
1880 if ((split
< slot_count
- 1) &&
1881 (b_node
->b_end
- split
) > (mt_min_slots
[b_node
->type
]))
1890 * mab_calc_split() - Calculate the split location and if there needs to be two
1892 * @bn: The maple_big_node with the data
1893 * @mid_split: The second split, if required. 0 otherwise.
1895 * Return: The first split location. The middle split is set in @mid_split.
1897 static inline int mab_calc_split(struct ma_state
*mas
,
1898 struct maple_big_node
*bn
, unsigned char *mid_split
, unsigned long min
)
1900 unsigned char b_end
= bn
->b_end
;
1901 int split
= b_end
/ 2; /* Assume equal split. */
1902 unsigned char slot_min
, slot_count
= mt_slots
[bn
->type
];
1905 * To support gap tracking, all NULL entries are kept together and a node cannot
1906 * end on a NULL entry, with the exception of the left-most leaf. The
1907 * limitation means that the split of a node must be checked for this condition
1908 * and be able to put more data in one direction or the other.
1910 if (unlikely((mas
->mas_flags
& MA_STATE_BULK
))) {
1912 split
= b_end
- mt_min_slots
[bn
->type
];
1914 if (!ma_is_leaf(bn
->type
))
1917 mas
->mas_flags
|= MA_STATE_REBALANCE
;
1918 if (!bn
->slot
[split
])
1924 * Although extremely rare, it is possible to enter what is known as the 3-way
1925 * split scenario. The 3-way split comes about by means of a store of a range
1926 * that overwrites the end and beginning of two full nodes. The result is a set
1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1928 * also be located in different parent nodes which are also full. This can
1929 * carry upwards all the way to the root in the worst case.
1931 if (unlikely(mab_middle_node(bn
, split
, slot_count
))) {
1933 *mid_split
= split
* 2;
1935 slot_min
= mt_min_slots
[bn
->type
];
1939 * Avoid having a range less than the slot count unless it
1940 * causes one node to be deficient.
1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1943 while ((split
< slot_count
- 1) &&
1944 ((bn
->pivot
[split
] - min
) < slot_count
- 1) &&
1945 (b_end
- split
> slot_min
))
1949 /* Avoid ending a node on a NULL entry */
1950 split
= mab_no_null_split(bn
, split
, slot_count
);
1952 if (unlikely(*mid_split
))
1953 *mid_split
= mab_no_null_split(bn
, *mid_split
, slot_count
);
1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1960 * and set @b_node->b_end to the next free slot.
1961 * @mas: The maple state
1962 * @mas_start: The starting slot to copy
1963 * @mas_end: The end slot to copy (inclusively)
1964 * @b_node: The maple_big_node to place the data
1965 * @mab_start: The starting location in maple_big_node to store the data.
1967 static inline void mas_mab_cp(struct ma_state
*mas
, unsigned char mas_start
,
1968 unsigned char mas_end
, struct maple_big_node
*b_node
,
1969 unsigned char mab_start
)
1972 struct maple_node
*node
;
1974 unsigned long *pivots
, *gaps
;
1975 int i
= mas_start
, j
= mab_start
;
1976 unsigned char piv_end
;
1979 mt
= mte_node_type(mas
->node
);
1980 pivots
= ma_pivots(node
, mt
);
1982 b_node
->pivot
[j
] = pivots
[i
++];
1983 if (unlikely(i
> mas_end
))
1988 piv_end
= min(mas_end
, mt_pivots
[mt
]);
1989 for (; i
< piv_end
; i
++, j
++) {
1990 b_node
->pivot
[j
] = pivots
[i
];
1991 if (unlikely(!b_node
->pivot
[j
]))
1994 if (unlikely(mas
->max
== b_node
->pivot
[j
]))
1998 if (likely(i
<= mas_end
))
1999 b_node
->pivot
[j
] = mas_safe_pivot(mas
, pivots
, i
, mt
);
2002 b_node
->b_end
= ++j
;
2004 slots
= ma_slots(node
, mt
);
2005 memcpy(b_node
->slot
+ mab_start
, slots
+ mas_start
, sizeof(void *) * j
);
2006 if (!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
)) {
2007 gaps
= ma_gaps(node
, mt
);
2008 memcpy(b_node
->gap
+ mab_start
, gaps
+ mas_start
,
2009 sizeof(unsigned long) * j
);
2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2015 * @mas: The maple state
2016 * @node: The maple node
2017 * @pivots: pointer to the maple node pivots
2018 * @mt: The maple type
2019 * @end: The assumed end
2021 * Note, end may be incremented within this function but not modified at the
2022 * source. This is fine since the metadata is the last thing to be stored in a
2023 * node during a write.
2025 static inline void mas_leaf_set_meta(struct ma_state
*mas
,
2026 struct maple_node
*node
, unsigned long *pivots
,
2027 enum maple_type mt
, unsigned char end
)
2029 /* There is no room for metadata already */
2030 if (mt_pivots
[mt
] <= end
)
2033 if (pivots
[end
] && pivots
[end
] < mas
->max
)
2036 if (end
< mt_slots
[mt
] - 1)
2037 ma_set_meta(node
, mt
, 0, end
);
2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2042 * @b_node: the maple_big_node that has the data
2043 * @mab_start: the start location in @b_node.
2044 * @mab_end: The end location in @b_node (inclusively)
2045 * @mas: The maple state with the maple encoded node.
2047 static inline void mab_mas_cp(struct maple_big_node
*b_node
,
2048 unsigned char mab_start
, unsigned char mab_end
,
2049 struct ma_state
*mas
, bool new_max
)
2052 enum maple_type mt
= mte_node_type(mas
->node
);
2053 struct maple_node
*node
= mte_to_node(mas
->node
);
2054 void __rcu
**slots
= ma_slots(node
, mt
);
2055 unsigned long *pivots
= ma_pivots(node
, mt
);
2056 unsigned long *gaps
= NULL
;
2059 if (mab_end
- mab_start
> mt_pivots
[mt
])
2062 if (!pivots
[mt_pivots
[mt
] - 1])
2063 slots
[mt_pivots
[mt
]] = NULL
;
2067 pivots
[j
++] = b_node
->pivot
[i
++];
2068 } while (i
<= mab_end
&& likely(b_node
->pivot
[i
]));
2070 memcpy(slots
, b_node
->slot
+ mab_start
,
2071 sizeof(void *) * (i
- mab_start
));
2074 mas
->max
= b_node
->pivot
[i
- 1];
2077 if (likely(!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
))) {
2078 unsigned long max_gap
= 0;
2079 unsigned char offset
= 15;
2081 gaps
= ma_gaps(node
, mt
);
2083 gaps
[--j
] = b_node
->gap
[--i
];
2084 if (gaps
[j
] > max_gap
) {
2090 ma_set_meta(node
, mt
, offset
, end
);
2092 mas_leaf_set_meta(mas
, node
, pivots
, mt
, end
);
2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2098 * @mas: the maple state with the maple encoded node of the sub-tree.
2100 * Descend through a sub-tree and adopt children who do not have the correct
2101 * parents set. Follow the parents which have the correct parents as they are
2102 * the new entries which need to be followed to find other incorrectly set
2105 static inline void mas_descend_adopt(struct ma_state
*mas
)
2107 struct ma_state list
[3], next
[3];
2111 * At each level there may be up to 3 correct parent pointers which indicates
2112 * the new nodes which need to be walked to find any new nodes at a lower level.
2115 for (i
= 0; i
< 3; i
++) {
2122 while (!mte_is_leaf(list
[0].node
)) {
2124 for (i
= 0; i
< 3; i
++) {
2125 if (mas_is_none(&list
[i
]))
2128 if (i
&& list
[i
-1].node
== list
[i
].node
)
2131 while ((n
< 3) && (mas_new_child(&list
[i
], &next
[n
])))
2134 mas_adopt_children(&list
[i
], list
[i
].node
);
2138 next
[n
++].node
= MAS_NONE
;
2140 /* descend by setting the list to the children */
2141 for (i
= 0; i
< 3; i
++)
2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2148 * @mas: The maple state
2149 * @end: The maple node end
2150 * @mt: The maple node type
2152 static inline void mas_bulk_rebalance(struct ma_state
*mas
, unsigned char end
,
2155 if (!(mas
->mas_flags
& MA_STATE_BULK
))
2158 if (mte_is_root(mas
->node
))
2161 if (end
> mt_min_slots
[mt
]) {
2162 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2169 * data from a maple encoded node.
2170 * @wr_mas: the maple write state
2171 * @b_node: the maple_big_node to fill with data
2172 * @offset_end: the offset to end copying
2174 * Return: The actual end of the data stored in @b_node
2176 static noinline_for_kasan
void mas_store_b_node(struct ma_wr_state
*wr_mas
,
2177 struct maple_big_node
*b_node
, unsigned char offset_end
)
2180 unsigned char b_end
;
2181 /* Possible underflow of piv will wrap back to 0 before use. */
2183 struct ma_state
*mas
= wr_mas
->mas
;
2185 b_node
->type
= wr_mas
->type
;
2189 /* Copy start data up to insert. */
2190 mas_mab_cp(mas
, 0, slot
- 1, b_node
, 0);
2191 b_end
= b_node
->b_end
;
2192 piv
= b_node
->pivot
[b_end
- 1];
2196 if (piv
+ 1 < mas
->index
) {
2197 /* Handle range starting after old range */
2198 b_node
->slot
[b_end
] = wr_mas
->content
;
2199 if (!wr_mas
->content
)
2200 b_node
->gap
[b_end
] = mas
->index
- 1 - piv
;
2201 b_node
->pivot
[b_end
++] = mas
->index
- 1;
2204 /* Store the new entry. */
2205 mas
->offset
= b_end
;
2206 b_node
->slot
[b_end
] = wr_mas
->entry
;
2207 b_node
->pivot
[b_end
] = mas
->last
;
2210 if (mas
->last
>= mas
->max
)
2213 /* Handle new range ending before old range ends */
2214 piv
= mas_logical_pivot(mas
, wr_mas
->pivots
, offset_end
, wr_mas
->type
);
2215 if (piv
> mas
->last
) {
2216 if (piv
== ULONG_MAX
)
2217 mas_bulk_rebalance(mas
, b_node
->b_end
, wr_mas
->type
);
2219 if (offset_end
!= slot
)
2220 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
2223 b_node
->slot
[++b_end
] = wr_mas
->content
;
2224 if (!wr_mas
->content
)
2225 b_node
->gap
[b_end
] = piv
- mas
->last
+ 1;
2226 b_node
->pivot
[b_end
] = piv
;
2229 slot
= offset_end
+ 1;
2230 if (slot
> wr_mas
->node_end
)
2233 /* Copy end data to the end of the node. */
2234 mas_mab_cp(mas
, slot
, wr_mas
->node_end
+ 1, b_node
, ++b_end
);
2239 b_node
->b_end
= b_end
;
2243 * mas_prev_sibling() - Find the previous node with the same parent.
2244 * @mas: the maple state
2246 * Return: True if there is a previous sibling, false otherwise.
2248 static inline bool mas_prev_sibling(struct ma_state
*mas
)
2250 unsigned int p_slot
= mte_parent_slot(mas
->node
);
2252 if (mte_is_root(mas
->node
))
2259 mas
->offset
= p_slot
- 1;
2265 * mas_next_sibling() - Find the next node with the same parent.
2266 * @mas: the maple state
2268 * Return: true if there is a next sibling, false otherwise.
2270 static inline bool mas_next_sibling(struct ma_state
*mas
)
2272 MA_STATE(parent
, mas
->tree
, mas
->index
, mas
->last
);
2274 if (mte_is_root(mas
->node
))
2278 mas_ascend(&parent
);
2279 parent
.offset
= mte_parent_slot(mas
->node
) + 1;
2280 if (parent
.offset
> mas_data_end(&parent
))
2289 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2290 * @enode: The encoded maple node.
2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2294 * Return: @enode or MAS_NONE
2296 static inline struct maple_enode
*mte_node_or_none(struct maple_enode
*enode
)
2301 return ma_enode_ptr(MAS_NONE
);
2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2306 * @wr_mas: The maple write state
2308 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2310 static inline void mas_wr_node_walk(struct ma_wr_state
*wr_mas
)
2312 struct ma_state
*mas
= wr_mas
->mas
;
2313 unsigned char count
, offset
;
2315 if (unlikely(ma_is_dense(wr_mas
->type
))) {
2316 wr_mas
->r_max
= wr_mas
->r_min
= mas
->index
;
2317 mas
->offset
= mas
->index
= mas
->min
;
2321 wr_mas
->node
= mas_mn(wr_mas
->mas
);
2322 wr_mas
->pivots
= ma_pivots(wr_mas
->node
, wr_mas
->type
);
2323 count
= wr_mas
->node_end
= ma_data_end(wr_mas
->node
, wr_mas
->type
,
2324 wr_mas
->pivots
, mas
->max
);
2325 offset
= mas
->offset
;
2327 while (offset
< count
&& mas
->index
> wr_mas
->pivots
[offset
])
2330 wr_mas
->r_max
= offset
< count
? wr_mas
->pivots
[offset
] : mas
->max
;
2331 wr_mas
->r_min
= mas_safe_min(mas
, wr_mas
->pivots
, offset
);
2332 wr_mas
->offset_end
= mas
->offset
= offset
;
2336 * mas_topiary_range() - Add a range of slots to the topiary.
2337 * @mas: The maple state
2338 * @destroy: The topiary to add the slots (usually destroy)
2339 * @start: The starting slot inclusively
2340 * @end: The end slot inclusively
2342 static inline void mas_topiary_range(struct ma_state
*mas
,
2343 struct ma_topiary
*destroy
, unsigned char start
, unsigned char end
)
2346 unsigned char offset
;
2348 MAS_BUG_ON(mas
, mte_is_leaf(mas
->node
));
2350 slots
= ma_slots(mas_mn(mas
), mte_node_type(mas
->node
));
2351 for (offset
= start
; offset
<= end
; offset
++) {
2352 struct maple_enode
*enode
= mas_slot_locked(mas
, slots
, offset
);
2354 if (mte_dead_node(enode
))
2357 mat_add(destroy
, enode
);
2362 * mast_topiary() - Add the portions of the tree to the removal list; either to
2363 * be freed or discarded (destroy walk).
2364 * @mast: The maple_subtree_state.
2366 static inline void mast_topiary(struct maple_subtree_state
*mast
)
2368 MA_WR_STATE(wr_mas
, mast
->orig_l
, NULL
);
2369 unsigned char r_start
, r_end
;
2370 unsigned char l_start
, l_end
;
2371 void __rcu
**l_slots
, **r_slots
;
2373 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2374 mast
->orig_l
->index
= mast
->orig_l
->last
;
2375 mas_wr_node_walk(&wr_mas
);
2376 l_start
= mast
->orig_l
->offset
+ 1;
2377 l_end
= mas_data_end(mast
->orig_l
);
2379 r_end
= mast
->orig_r
->offset
;
2384 l_slots
= ma_slots(mas_mn(mast
->orig_l
),
2385 mte_node_type(mast
->orig_l
->node
));
2387 r_slots
= ma_slots(mas_mn(mast
->orig_r
),
2388 mte_node_type(mast
->orig_r
->node
));
2390 if ((l_start
< l_end
) &&
2391 mte_dead_node(mas_slot_locked(mast
->orig_l
, l_slots
, l_start
))) {
2395 if (mte_dead_node(mas_slot_locked(mast
->orig_r
, r_slots
, r_end
))) {
2400 if ((l_start
> r_end
) && (mast
->orig_l
->node
== mast
->orig_r
->node
))
2403 /* At the node where left and right sides meet, add the parts between */
2404 if (mast
->orig_l
->node
== mast
->orig_r
->node
) {
2405 return mas_topiary_range(mast
->orig_l
, mast
->destroy
,
2409 /* mast->orig_r is different and consumed. */
2410 if (mte_is_leaf(mast
->orig_r
->node
))
2413 if (mte_dead_node(mas_slot_locked(mast
->orig_l
, l_slots
, l_end
)))
2417 if (l_start
<= l_end
)
2418 mas_topiary_range(mast
->orig_l
, mast
->destroy
, l_start
, l_end
);
2420 if (mte_dead_node(mas_slot_locked(mast
->orig_r
, r_slots
, r_start
)))
2423 if (r_start
<= r_end
)
2424 mas_topiary_range(mast
->orig_r
, mast
->destroy
, 0, r_end
);
2428 * mast_rebalance_next() - Rebalance against the next node
2429 * @mast: The maple subtree state
2430 * @old_r: The encoded maple node to the right (next node).
2432 static inline void mast_rebalance_next(struct maple_subtree_state
*mast
)
2434 unsigned char b_end
= mast
->bn
->b_end
;
2436 mas_mab_cp(mast
->orig_r
, 0, mt_slot_count(mast
->orig_r
->node
),
2438 mast
->orig_r
->last
= mast
->orig_r
->max
;
2442 * mast_rebalance_prev() - Rebalance against the previous node
2443 * @mast: The maple subtree state
2444 * @old_l: The encoded maple node to the left (previous node)
2446 static inline void mast_rebalance_prev(struct maple_subtree_state
*mast
)
2448 unsigned char end
= mas_data_end(mast
->orig_l
) + 1;
2449 unsigned char b_end
= mast
->bn
->b_end
;
2451 mab_shift_right(mast
->bn
, end
);
2452 mas_mab_cp(mast
->orig_l
, 0, end
- 1, mast
->bn
, 0);
2453 mast
->l
->min
= mast
->orig_l
->min
;
2454 mast
->orig_l
->index
= mast
->orig_l
->min
;
2455 mast
->bn
->b_end
= end
+ b_end
;
2456 mast
->l
->offset
+= end
;
2460 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2461 * the node to the right. Checking the nodes to the right then the left at each
2462 * level upwards until root is reached. Free and destroy as needed.
2463 * Data is copied into the @mast->bn.
2464 * @mast: The maple_subtree_state.
2467 bool mast_spanning_rebalance(struct maple_subtree_state
*mast
)
2469 struct ma_state r_tmp
= *mast
->orig_r
;
2470 struct ma_state l_tmp
= *mast
->orig_l
;
2471 struct maple_enode
*ancestor
= NULL
;
2472 unsigned char start
, end
;
2473 unsigned char depth
= 0;
2475 r_tmp
= *mast
->orig_r
;
2476 l_tmp
= *mast
->orig_l
;
2478 mas_ascend(mast
->orig_r
);
2479 mas_ascend(mast
->orig_l
);
2482 (mast
->orig_r
->node
== mast
->orig_l
->node
)) {
2483 ancestor
= mast
->orig_r
->node
;
2484 end
= mast
->orig_r
->offset
- 1;
2485 start
= mast
->orig_l
->offset
+ 1;
2488 if (mast
->orig_r
->offset
< mas_data_end(mast
->orig_r
)) {
2490 ancestor
= mast
->orig_r
->node
;
2494 mast
->orig_r
->offset
++;
2496 mas_descend(mast
->orig_r
);
2497 mast
->orig_r
->offset
= 0;
2501 mast_rebalance_next(mast
);
2503 unsigned char l_off
= 0;
2504 struct maple_enode
*child
= r_tmp
.node
;
2507 if (ancestor
== r_tmp
.node
)
2513 if (l_off
< r_tmp
.offset
)
2514 mas_topiary_range(&r_tmp
, mast
->destroy
,
2515 l_off
, r_tmp
.offset
);
2517 if (l_tmp
.node
!= child
)
2518 mat_add(mast
->free
, child
);
2520 } while (r_tmp
.node
!= ancestor
);
2522 *mast
->orig_l
= l_tmp
;
2525 } else if (mast
->orig_l
->offset
!= 0) {
2527 ancestor
= mast
->orig_l
->node
;
2528 end
= mas_data_end(mast
->orig_l
);
2531 mast
->orig_l
->offset
--;
2533 mas_descend(mast
->orig_l
);
2534 mast
->orig_l
->offset
=
2535 mas_data_end(mast
->orig_l
);
2539 mast_rebalance_prev(mast
);
2541 unsigned char r_off
;
2542 struct maple_enode
*child
= l_tmp
.node
;
2545 if (ancestor
== l_tmp
.node
)
2548 r_off
= mas_data_end(&l_tmp
);
2550 if (l_tmp
.offset
< r_off
)
2553 if (l_tmp
.offset
< r_off
)
2554 mas_topiary_range(&l_tmp
, mast
->destroy
,
2555 l_tmp
.offset
, r_off
);
2557 if (r_tmp
.node
!= child
)
2558 mat_add(mast
->free
, child
);
2560 } while (l_tmp
.node
!= ancestor
);
2562 *mast
->orig_r
= r_tmp
;
2565 } while (!mte_is_root(mast
->orig_r
->node
));
2567 *mast
->orig_r
= r_tmp
;
2568 *mast
->orig_l
= l_tmp
;
2573 * mast_ascend_free() - Add current original maple state nodes to the free list
2575 * @mast: the maple subtree state.
2577 * Ascend the original left and right sides and add the previous nodes to the
2578 * free list. Set the slots to point to the correct location in the new nodes.
2581 mast_ascend_free(struct maple_subtree_state
*mast
)
2583 MA_WR_STATE(wr_mas
, mast
->orig_r
, NULL
);
2584 struct maple_enode
*left
= mast
->orig_l
->node
;
2585 struct maple_enode
*right
= mast
->orig_r
->node
;
2587 mas_ascend(mast
->orig_l
);
2588 mas_ascend(mast
->orig_r
);
2589 mat_add(mast
->free
, left
);
2592 mat_add(mast
->free
, right
);
2594 mast
->orig_r
->offset
= 0;
2595 mast
->orig_r
->index
= mast
->r
->max
;
2596 /* last should be larger than or equal to index */
2597 if (mast
->orig_r
->last
< mast
->orig_r
->index
)
2598 mast
->orig_r
->last
= mast
->orig_r
->index
;
2600 * The node may not contain the value so set slot to ensure all
2601 * of the nodes contents are freed or destroyed.
2603 wr_mas
.type
= mte_node_type(mast
->orig_r
->node
);
2604 mas_wr_node_walk(&wr_mas
);
2605 /* Set up the left side of things */
2606 mast
->orig_l
->offset
= 0;
2607 mast
->orig_l
->index
= mast
->l
->min
;
2608 wr_mas
.mas
= mast
->orig_l
;
2609 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2610 mas_wr_node_walk(&wr_mas
);
2612 mast
->bn
->type
= wr_mas
.type
;
2616 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2617 * @mas: the maple state with the allocations.
2618 * @b_node: the maple_big_node with the type encoding.
2620 * Use the node type from the maple_big_node to allocate a new node from the
2621 * ma_state. This function exists mainly for code readability.
2623 * Return: A new maple encoded node
2625 static inline struct maple_enode
2626 *mas_new_ma_node(struct ma_state
*mas
, struct maple_big_node
*b_node
)
2628 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)), b_node
->type
);
2632 * mas_mab_to_node() - Set up right and middle nodes
2634 * @mas: the maple state that contains the allocations.
2635 * @b_node: the node which contains the data.
2636 * @left: The pointer which will have the left node
2637 * @right: The pointer which may have the right node
2638 * @middle: the pointer which may have the middle node (rare)
2639 * @mid_split: the split location for the middle node
2641 * Return: the split of left.
2643 static inline unsigned char mas_mab_to_node(struct ma_state
*mas
,
2644 struct maple_big_node
*b_node
, struct maple_enode
**left
,
2645 struct maple_enode
**right
, struct maple_enode
**middle
,
2646 unsigned char *mid_split
, unsigned long min
)
2648 unsigned char split
= 0;
2649 unsigned char slot_count
= mt_slots
[b_node
->type
];
2651 *left
= mas_new_ma_node(mas
, b_node
);
2656 if (b_node
->b_end
< slot_count
) {
2657 split
= b_node
->b_end
;
2659 split
= mab_calc_split(mas
, b_node
, mid_split
, min
);
2660 *right
= mas_new_ma_node(mas
, b_node
);
2664 *middle
= mas_new_ma_node(mas
, b_node
);
2671 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2673 * @b_node - the big node to add the entry
2674 * @mas - the maple state to get the pivot (mas->max)
2675 * @entry - the entry to add, if NULL nothing happens.
2677 static inline void mab_set_b_end(struct maple_big_node
*b_node
,
2678 struct ma_state
*mas
,
2684 b_node
->slot
[b_node
->b_end
] = entry
;
2685 if (mt_is_alloc(mas
->tree
))
2686 b_node
->gap
[b_node
->b_end
] = mas_max_gap(mas
);
2687 b_node
->pivot
[b_node
->b_end
++] = mas
->max
;
2691 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2692 * of @mas->node to either @left or @right, depending on @slot and @split
2694 * @mas - the maple state with the node that needs a parent
2695 * @left - possible parent 1
2696 * @right - possible parent 2
2697 * @slot - the slot the mas->node was placed
2698 * @split - the split location between @left and @right
2700 static inline void mas_set_split_parent(struct ma_state
*mas
,
2701 struct maple_enode
*left
,
2702 struct maple_enode
*right
,
2703 unsigned char *slot
, unsigned char split
)
2705 if (mas_is_none(mas
))
2708 if ((*slot
) <= split
)
2709 mas_set_parent(mas
, mas
->node
, left
, *slot
);
2711 mas_set_parent(mas
, mas
->node
, right
, (*slot
) - split
- 1);
2717 * mte_mid_split_check() - Check if the next node passes the mid-split
2718 * @**l: Pointer to left encoded maple node.
2719 * @**m: Pointer to middle encoded maple node.
2720 * @**r: Pointer to right encoded maple node.
2722 * @*split: The split location.
2723 * @mid_split: The middle split.
2725 static inline void mte_mid_split_check(struct maple_enode
**l
,
2726 struct maple_enode
**r
,
2727 struct maple_enode
*right
,
2729 unsigned char *split
,
2730 unsigned char mid_split
)
2735 if (slot
< mid_split
)
2744 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2745 * is taken from @mast->l.
2746 * @mast - the maple subtree state
2747 * @left - the left node
2748 * @right - the right node
2749 * @split - the split location.
2751 static inline void mast_set_split_parents(struct maple_subtree_state
*mast
,
2752 struct maple_enode
*left
,
2753 struct maple_enode
*middle
,
2754 struct maple_enode
*right
,
2755 unsigned char split
,
2756 unsigned char mid_split
)
2759 struct maple_enode
*l
= left
;
2760 struct maple_enode
*r
= right
;
2762 if (mas_is_none(mast
->l
))
2768 slot
= mast
->l
->offset
;
2770 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2771 mas_set_split_parent(mast
->l
, l
, r
, &slot
, split
);
2773 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2774 mas_set_split_parent(mast
->m
, l
, r
, &slot
, split
);
2776 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2777 mas_set_split_parent(mast
->r
, l
, r
, &slot
, split
);
2781 * mas_wmb_replace() - Write memory barrier and replace
2782 * @mas: The maple state
2783 * @free: the maple topiary list of nodes to free
2784 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2786 * Updates gap as necessary.
2788 static inline void mas_wmb_replace(struct ma_state
*mas
,
2789 struct ma_topiary
*free
,
2790 struct ma_topiary
*destroy
)
2792 /* All nodes must see old data as dead prior to replacing that data */
2793 smp_wmb(); /* Needed for RCU */
2795 /* Insert the new data in the tree */
2796 mas_replace(mas
, true);
2798 if (!mte_is_leaf(mas
->node
))
2799 mas_descend_adopt(mas
);
2801 mas_mat_free(mas
, free
);
2804 mas_mat_destroy(mas
, destroy
);
2806 if (mte_is_leaf(mas
->node
))
2809 mas_update_gap(mas
);
2813 * mast_new_root() - Set a new tree root during subtree creation
2814 * @mast: The maple subtree state
2815 * @mas: The maple state
2817 static inline void mast_new_root(struct maple_subtree_state
*mast
,
2818 struct ma_state
*mas
)
2820 mas_mn(mast
->l
)->parent
=
2821 ma_parent_ptr(((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
2822 if (!mte_dead_node(mast
->orig_l
->node
) &&
2823 !mte_is_root(mast
->orig_l
->node
)) {
2825 mast_ascend_free(mast
);
2827 } while (!mte_is_root(mast
->orig_l
->node
));
2829 if ((mast
->orig_l
->node
!= mas
->node
) &&
2830 (mast
->l
->depth
> mas_mt_height(mas
))) {
2831 mat_add(mast
->free
, mas
->node
);
2836 * mast_cp_to_nodes() - Copy data out to nodes.
2837 * @mast: The maple subtree state
2838 * @left: The left encoded maple node
2839 * @middle: The middle encoded maple node
2840 * @right: The right encoded maple node
2841 * @split: The location to split between left and (middle ? middle : right)
2842 * @mid_split: The location to split between middle and right.
2844 static inline void mast_cp_to_nodes(struct maple_subtree_state
*mast
,
2845 struct maple_enode
*left
, struct maple_enode
*middle
,
2846 struct maple_enode
*right
, unsigned char split
, unsigned char mid_split
)
2848 bool new_lmax
= true;
2850 mast
->l
->node
= mte_node_or_none(left
);
2851 mast
->m
->node
= mte_node_or_none(middle
);
2852 mast
->r
->node
= mte_node_or_none(right
);
2854 mast
->l
->min
= mast
->orig_l
->min
;
2855 if (split
== mast
->bn
->b_end
) {
2856 mast
->l
->max
= mast
->orig_r
->max
;
2860 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, new_lmax
);
2863 mab_mas_cp(mast
->bn
, 1 + split
, mid_split
, mast
->m
, true);
2864 mast
->m
->min
= mast
->bn
->pivot
[split
] + 1;
2868 mast
->r
->max
= mast
->orig_r
->max
;
2870 mab_mas_cp(mast
->bn
, 1 + split
, mast
->bn
->b_end
, mast
->r
, false);
2871 mast
->r
->min
= mast
->bn
->pivot
[split
] + 1;
2876 * mast_combine_cp_left - Copy in the original left side of the tree into the
2877 * combined data set in the maple subtree state big node.
2878 * @mast: The maple subtree state
2880 static inline void mast_combine_cp_left(struct maple_subtree_state
*mast
)
2882 unsigned char l_slot
= mast
->orig_l
->offset
;
2887 mas_mab_cp(mast
->orig_l
, 0, l_slot
- 1, mast
->bn
, 0);
2891 * mast_combine_cp_right: Copy in the original right side of the tree into the
2892 * combined data set in the maple subtree state big node.
2893 * @mast: The maple subtree state
2895 static inline void mast_combine_cp_right(struct maple_subtree_state
*mast
)
2897 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] >= mast
->orig_r
->max
)
2900 mas_mab_cp(mast
->orig_r
, mast
->orig_r
->offset
+ 1,
2901 mt_slot_count(mast
->orig_r
->node
), mast
->bn
,
2903 mast
->orig_r
->last
= mast
->orig_r
->max
;
2907 * mast_sufficient: Check if the maple subtree state has enough data in the big
2908 * node to create at least one sufficient node
2909 * @mast: the maple subtree state
2911 static inline bool mast_sufficient(struct maple_subtree_state
*mast
)
2913 if (mast
->bn
->b_end
> mt_min_slot_count(mast
->orig_l
->node
))
2920 * mast_overflow: Check if there is too much data in the subtree state for a
2922 * @mast: The maple subtree state
2924 static inline bool mast_overflow(struct maple_subtree_state
*mast
)
2926 if (mast
->bn
->b_end
>= mt_slot_count(mast
->orig_l
->node
))
2932 static inline void *mtree_range_walk(struct ma_state
*mas
)
2934 unsigned long *pivots
;
2935 unsigned char offset
;
2936 struct maple_node
*node
;
2937 struct maple_enode
*next
, *last
;
2938 enum maple_type type
;
2941 unsigned long max
, min
;
2942 unsigned long prev_max
, prev_min
;
2950 node
= mte_to_node(next
);
2951 type
= mte_node_type(next
);
2952 pivots
= ma_pivots(node
, type
);
2953 end
= ma_data_end(node
, type
, pivots
, max
);
2954 if (unlikely(ma_dead_node(node
)))
2957 if (pivots
[offset
] >= mas
->index
) {
2960 max
= pivots
[offset
];
2966 } while ((offset
< end
) && (pivots
[offset
] < mas
->index
));
2969 min
= pivots
[offset
- 1] + 1;
2971 if (likely(offset
< end
&& pivots
[offset
]))
2972 max
= pivots
[offset
];
2975 slots
= ma_slots(node
, type
);
2976 next
= mt_slot(mas
->tree
, slots
, offset
);
2977 if (unlikely(ma_dead_node(node
)))
2979 } while (!ma_is_leaf(type
));
2981 mas
->offset
= offset
;
2984 mas
->min
= prev_min
;
2985 mas
->max
= prev_max
;
2987 return (void *)next
;
2995 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2996 * @mas: The starting maple state
2997 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2998 * @count: The estimated count of iterations needed.
3000 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3001 * is hit. First @b_node is split into two entries which are inserted into the
3002 * next iteration of the loop. @b_node is returned populated with the final
3003 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3004 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3005 * to account of what has been copied into the new sub-tree. The update of
3006 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3007 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3008 * the new sub-tree in case the sub-tree becomes the full tree.
3010 * Return: the number of elements in b_node during the last loop.
3012 static int mas_spanning_rebalance(struct ma_state
*mas
,
3013 struct maple_subtree_state
*mast
, unsigned char count
)
3015 unsigned char split
, mid_split
;
3016 unsigned char slot
= 0;
3017 struct maple_enode
*left
= NULL
, *middle
= NULL
, *right
= NULL
;
3019 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->index
);
3020 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3021 MA_STATE(m_mas
, mas
->tree
, mas
->index
, mas
->index
);
3022 MA_TOPIARY(free
, mas
->tree
);
3023 MA_TOPIARY(destroy
, mas
->tree
);
3026 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3027 * Rebalancing is done by use of the ``struct maple_topiary``.
3033 mast
->destroy
= &destroy
;
3034 l_mas
.node
= r_mas
.node
= m_mas
.node
= MAS_NONE
;
3036 /* Check if this is not root and has sufficient data. */
3037 if (((mast
->orig_l
->min
!= 0) || (mast
->orig_r
->max
!= ULONG_MAX
)) &&
3038 unlikely(mast
->bn
->b_end
<= mt_min_slots
[mast
->bn
->type
]))
3039 mast_spanning_rebalance(mast
);
3041 mast
->orig_l
->depth
= 0;
3044 * Each level of the tree is examined and balanced, pushing data to the left or
3045 * right, or rebalancing against left or right nodes is employed to avoid
3046 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3047 * the tree is created, there may be a mix of new and old nodes. The old nodes
3048 * will have the incorrect parent pointers and currently be in two trees: the
3049 * original tree and the partially new tree. To remedy the parent pointers in
3050 * the old tree, the new data is swapped into the active tree and a walk down
3051 * the tree is performed and the parent pointers are updated.
3052 * See mas_descend_adopt() for more information..
3056 mast
->bn
->type
= mte_node_type(mast
->orig_l
->node
);
3057 split
= mas_mab_to_node(mas
, mast
->bn
, &left
, &right
, &middle
,
3058 &mid_split
, mast
->orig_l
->min
);
3059 mast_set_split_parents(mast
, left
, middle
, right
, split
,
3061 mast_cp_to_nodes(mast
, left
, middle
, right
, split
, mid_split
);
3064 * Copy data from next level in the tree to mast->bn from next
3067 memset(mast
->bn
, 0, sizeof(struct maple_big_node
));
3068 mast
->bn
->type
= mte_node_type(left
);
3069 mast
->orig_l
->depth
++;
3071 /* Root already stored in l->node. */
3072 if (mas_is_root_limits(mast
->l
))
3075 mast_ascend_free(mast
);
3076 mast_combine_cp_left(mast
);
3077 l_mas
.offset
= mast
->bn
->b_end
;
3078 mab_set_b_end(mast
->bn
, &l_mas
, left
);
3079 mab_set_b_end(mast
->bn
, &m_mas
, middle
);
3080 mab_set_b_end(mast
->bn
, &r_mas
, right
);
3082 /* Copy anything necessary out of the right node. */
3083 mast_combine_cp_right(mast
);
3085 mast
->orig_l
->last
= mast
->orig_l
->max
;
3087 if (mast_sufficient(mast
))
3090 if (mast_overflow(mast
))
3093 /* May be a new root stored in mast->bn */
3094 if (mas_is_root_limits(mast
->orig_l
))
3097 mast_spanning_rebalance(mast
);
3099 /* rebalancing from other nodes may require another loop. */
3104 l_mas
.node
= mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)),
3105 mte_node_type(mast
->orig_l
->node
));
3106 mast
->orig_l
->depth
++;
3107 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, &l_mas
, true);
3108 mas_set_parent(mas
, left
, l_mas
.node
, slot
);
3110 mas_set_parent(mas
, middle
, l_mas
.node
, ++slot
);
3113 mas_set_parent(mas
, right
, l_mas
.node
, ++slot
);
3115 if (mas_is_root_limits(mast
->l
)) {
3117 mast_new_root(mast
, mas
);
3119 mas_mn(&l_mas
)->parent
= mas_mn(mast
->orig_l
)->parent
;
3122 if (!mte_dead_node(mast
->orig_l
->node
))
3123 mat_add(&free
, mast
->orig_l
->node
);
3125 mas
->depth
= mast
->orig_l
->depth
;
3126 *mast
->orig_l
= l_mas
;
3127 mte_set_node_dead(mas
->node
);
3129 /* Set up mas for insertion. */
3130 mast
->orig_l
->depth
= mas
->depth
;
3131 mast
->orig_l
->alloc
= mas
->alloc
;
3132 *mas
= *mast
->orig_l
;
3133 mas_wmb_replace(mas
, &free
, &destroy
);
3134 mtree_range_walk(mas
);
3135 return mast
->bn
->b_end
;
3139 * mas_rebalance() - Rebalance a given node.
3140 * @mas: The maple state
3141 * @b_node: The big maple node.
3143 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3144 * Continue upwards until tree is sufficient.
3146 * Return: the number of elements in b_node during the last loop.
3148 static inline int mas_rebalance(struct ma_state
*mas
,
3149 struct maple_big_node
*b_node
)
3151 char empty_count
= mas_mt_height(mas
);
3152 struct maple_subtree_state mast
;
3153 unsigned char shift
, b_end
= ++b_node
->b_end
;
3155 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3156 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3158 trace_ma_op(__func__
, mas
);
3161 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3162 * against the node to the right if it exists, otherwise the node to the
3163 * left of this node is rebalanced against this node. If rebalancing
3164 * causes just one node to be produced instead of two, then the parent
3165 * is also examined and rebalanced if it is insufficient. Every level
3166 * tries to combine the data in the same way. If one node contains the
3167 * entire range of the tree, then that node is used as a new root node.
3169 mas_node_count(mas
, 1 + empty_count
* 3);
3170 if (mas_is_err(mas
))
3173 mast
.orig_l
= &l_mas
;
3174 mast
.orig_r
= &r_mas
;
3176 mast
.bn
->type
= mte_node_type(mas
->node
);
3178 l_mas
= r_mas
= *mas
;
3180 if (mas_next_sibling(&r_mas
)) {
3181 mas_mab_cp(&r_mas
, 0, mt_slot_count(r_mas
.node
), b_node
, b_end
);
3182 r_mas
.last
= r_mas
.index
= r_mas
.max
;
3184 mas_prev_sibling(&l_mas
);
3185 shift
= mas_data_end(&l_mas
) + 1;
3186 mab_shift_right(b_node
, shift
);
3187 mas
->offset
+= shift
;
3188 mas_mab_cp(&l_mas
, 0, shift
- 1, b_node
, 0);
3189 b_node
->b_end
= shift
+ b_end
;
3190 l_mas
.index
= l_mas
.last
= l_mas
.min
;
3193 return mas_spanning_rebalance(mas
, &mast
, empty_count
);
3197 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3199 * @mas: The maple state
3200 * @end: The end of the left-most node.
3202 * During a mass-insert event (such as forking), it may be necessary to
3203 * rebalance the left-most node when it is not sufficient.
3205 static inline void mas_destroy_rebalance(struct ma_state
*mas
, unsigned char end
)
3207 enum maple_type mt
= mte_node_type(mas
->node
);
3208 struct maple_node reuse
, *newnode
, *parent
, *new_left
, *left
, *node
;
3209 struct maple_enode
*eparent
;
3210 unsigned char offset
, tmp
, split
= mt_slots
[mt
] / 2;
3211 void __rcu
**l_slots
, **slots
;
3212 unsigned long *l_pivs
, *pivs
, gap
;
3213 bool in_rcu
= mt_in_rcu(mas
->tree
);
3215 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3218 mas_prev_sibling(&l_mas
);
3222 /* Allocate for both left and right as well as parent. */
3223 mas_node_count(mas
, 3);
3224 if (mas_is_err(mas
))
3227 newnode
= mas_pop_node(mas
);
3233 newnode
->parent
= node
->parent
;
3234 slots
= ma_slots(newnode
, mt
);
3235 pivs
= ma_pivots(newnode
, mt
);
3236 left
= mas_mn(&l_mas
);
3237 l_slots
= ma_slots(left
, mt
);
3238 l_pivs
= ma_pivots(left
, mt
);
3239 if (!l_slots
[split
])
3241 tmp
= mas_data_end(&l_mas
) - split
;
3243 memcpy(slots
, l_slots
+ split
+ 1, sizeof(void *) * tmp
);
3244 memcpy(pivs
, l_pivs
+ split
+ 1, sizeof(unsigned long) * tmp
);
3245 pivs
[tmp
] = l_mas
.max
;
3246 memcpy(slots
+ tmp
, ma_slots(node
, mt
), sizeof(void *) * end
);
3247 memcpy(pivs
+ tmp
, ma_pivots(node
, mt
), sizeof(unsigned long) * end
);
3249 l_mas
.max
= l_pivs
[split
];
3250 mas
->min
= l_mas
.max
+ 1;
3251 eparent
= mt_mk_node(mte_parent(l_mas
.node
),
3252 mas_parent_type(&l_mas
, l_mas
.node
));
3255 unsigned char max_p
= mt_pivots
[mt
];
3256 unsigned char max_s
= mt_slots
[mt
];
3259 memset(pivs
+ tmp
, 0,
3260 sizeof(unsigned long) * (max_p
- tmp
));
3262 if (tmp
< mt_slots
[mt
])
3263 memset(slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3265 memcpy(node
, newnode
, sizeof(struct maple_node
));
3266 ma_set_meta(node
, mt
, 0, tmp
- 1);
3267 mte_set_pivot(eparent
, mte_parent_slot(l_mas
.node
),
3270 /* Remove data from l_pivs. */
3272 memset(l_pivs
+ tmp
, 0, sizeof(unsigned long) * (max_p
- tmp
));
3273 memset(l_slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3274 ma_set_meta(left
, mt
, 0, split
);
3279 /* RCU requires replacing both l_mas, mas, and parent. */
3280 mas
->node
= mt_mk_node(newnode
, mt
);
3281 ma_set_meta(newnode
, mt
, 0, tmp
);
3283 new_left
= mas_pop_node(mas
);
3284 new_left
->parent
= left
->parent
;
3285 mt
= mte_node_type(l_mas
.node
);
3286 slots
= ma_slots(new_left
, mt
);
3287 pivs
= ma_pivots(new_left
, mt
);
3288 memcpy(slots
, l_slots
, sizeof(void *) * split
);
3289 memcpy(pivs
, l_pivs
, sizeof(unsigned long) * split
);
3290 ma_set_meta(new_left
, mt
, 0, split
);
3291 l_mas
.node
= mt_mk_node(new_left
, mt
);
3293 /* replace parent. */
3294 offset
= mte_parent_slot(mas
->node
);
3295 mt
= mas_parent_type(&l_mas
, l_mas
.node
);
3296 parent
= mas_pop_node(mas
);
3297 slots
= ma_slots(parent
, mt
);
3298 pivs
= ma_pivots(parent
, mt
);
3299 memcpy(parent
, mte_to_node(eparent
), sizeof(struct maple_node
));
3300 rcu_assign_pointer(slots
[offset
], mas
->node
);
3301 rcu_assign_pointer(slots
[offset
- 1], l_mas
.node
);
3302 pivs
[offset
- 1] = l_mas
.max
;
3303 eparent
= mt_mk_node(parent
, mt
);
3305 gap
= mas_leaf_max_gap(mas
);
3306 mte_set_gap(eparent
, mte_parent_slot(mas
->node
), gap
);
3307 gap
= mas_leaf_max_gap(&l_mas
);
3308 mte_set_gap(eparent
, mte_parent_slot(l_mas
.node
), gap
);
3312 mas_replace(mas
, false);
3314 mas_update_gap(mas
);
3318 * mas_split_final_node() - Split the final node in a subtree operation.
3319 * @mast: the maple subtree state
3320 * @mas: The maple state
3321 * @height: The height of the tree in case it's a new root.
3323 static inline bool mas_split_final_node(struct maple_subtree_state
*mast
,
3324 struct ma_state
*mas
, int height
)
3326 struct maple_enode
*ancestor
;
3328 if (mte_is_root(mas
->node
)) {
3329 if (mt_is_alloc(mas
->tree
))
3330 mast
->bn
->type
= maple_arange_64
;
3332 mast
->bn
->type
= maple_range_64
;
3333 mas
->depth
= height
;
3336 * Only a single node is used here, could be root.
3337 * The Big_node data should just fit in a single node.
3339 ancestor
= mas_new_ma_node(mas
, mast
->bn
);
3340 mas_set_parent(mas
, mast
->l
->node
, ancestor
, mast
->l
->offset
);
3341 mas_set_parent(mas
, mast
->r
->node
, ancestor
, mast
->r
->offset
);
3342 mte_to_node(ancestor
)->parent
= mas_mn(mas
)->parent
;
3344 mast
->l
->node
= ancestor
;
3345 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, mast
->l
, true);
3346 mas
->offset
= mast
->bn
->b_end
- 1;
3351 * mast_fill_bnode() - Copy data into the big node in the subtree state
3352 * @mast: The maple subtree state
3353 * @mas: the maple state
3354 * @skip: The number of entries to skip for new nodes insertion.
3356 static inline void mast_fill_bnode(struct maple_subtree_state
*mast
,
3357 struct ma_state
*mas
,
3361 struct maple_enode
*old
= mas
->node
;
3362 unsigned char split
;
3364 memset(mast
->bn
->gap
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->gap
));
3365 memset(mast
->bn
->slot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->slot
));
3366 memset(mast
->bn
->pivot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->pivot
));
3367 mast
->bn
->b_end
= 0;
3369 if (mte_is_root(mas
->node
)) {
3373 mat_add(mast
->free
, old
);
3374 mas
->offset
= mte_parent_slot(mas
->node
);
3377 if (cp
&& mast
->l
->offset
)
3378 mas_mab_cp(mas
, 0, mast
->l
->offset
- 1, mast
->bn
, 0);
3380 split
= mast
->bn
->b_end
;
3381 mab_set_b_end(mast
->bn
, mast
->l
, mast
->l
->node
);
3382 mast
->r
->offset
= mast
->bn
->b_end
;
3383 mab_set_b_end(mast
->bn
, mast
->r
, mast
->r
->node
);
3384 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] == mas
->max
)
3388 mas_mab_cp(mas
, split
+ skip
, mt_slot_count(mas
->node
) - 1,
3389 mast
->bn
, mast
->bn
->b_end
);
3392 mast
->bn
->type
= mte_node_type(mas
->node
);
3396 * mast_split_data() - Split the data in the subtree state big node into regular
3398 * @mast: The maple subtree state
3399 * @mas: The maple state
3400 * @split: The location to split the big node
3402 static inline void mast_split_data(struct maple_subtree_state
*mast
,
3403 struct ma_state
*mas
, unsigned char split
)
3405 unsigned char p_slot
;
3407 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, true);
3408 mte_set_pivot(mast
->r
->node
, 0, mast
->r
->max
);
3409 mab_mas_cp(mast
->bn
, split
+ 1, mast
->bn
->b_end
, mast
->r
, false);
3410 mast
->l
->offset
= mte_parent_slot(mas
->node
);
3411 mast
->l
->max
= mast
->bn
->pivot
[split
];
3412 mast
->r
->min
= mast
->l
->max
+ 1;
3413 if (mte_is_leaf(mas
->node
))
3416 p_slot
= mast
->orig_l
->offset
;
3417 mas_set_split_parent(mast
->orig_l
, mast
->l
->node
, mast
->r
->node
,
3419 mas_set_split_parent(mast
->orig_r
, mast
->l
->node
, mast
->r
->node
,
3424 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3425 * data to the right or left node if there is room.
3426 * @mas: The maple state
3427 * @height: The current height of the maple state
3428 * @mast: The maple subtree state
3429 * @left: Push left or not.
3431 * Keeping the height of the tree low means faster lookups.
3433 * Return: True if pushed, false otherwise.
3435 static inline bool mas_push_data(struct ma_state
*mas
, int height
,
3436 struct maple_subtree_state
*mast
, bool left
)
3438 unsigned char slot_total
= mast
->bn
->b_end
;
3439 unsigned char end
, space
, split
;
3441 MA_STATE(tmp_mas
, mas
->tree
, mas
->index
, mas
->last
);
3443 tmp_mas
.depth
= mast
->l
->depth
;
3445 if (left
&& !mas_prev_sibling(&tmp_mas
))
3447 else if (!left
&& !mas_next_sibling(&tmp_mas
))
3450 end
= mas_data_end(&tmp_mas
);
3452 space
= 2 * mt_slot_count(mas
->node
) - 2;
3453 /* -2 instead of -1 to ensure there isn't a triple split */
3454 if (ma_is_leaf(mast
->bn
->type
))
3457 if (mas
->max
== ULONG_MAX
)
3460 if (slot_total
>= space
)
3463 /* Get the data; Fill mast->bn */
3466 mab_shift_right(mast
->bn
, end
+ 1);
3467 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, 0);
3468 mast
->bn
->b_end
= slot_total
+ 1;
3470 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, mast
->bn
->b_end
);
3473 /* Configure mast for splitting of mast->bn */
3474 split
= mt_slots
[mast
->bn
->type
] - 2;
3476 /* Switch mas to prev node */
3477 mat_add(mast
->free
, mas
->node
);
3479 /* Start using mast->l for the left side. */
3480 tmp_mas
.node
= mast
->l
->node
;
3483 mat_add(mast
->free
, tmp_mas
.node
);
3484 tmp_mas
.node
= mast
->r
->node
;
3486 split
= slot_total
- split
;
3488 split
= mab_no_null_split(mast
->bn
, split
, mt_slots
[mast
->bn
->type
]);
3489 /* Update parent slot for split calculation. */
3491 mast
->orig_l
->offset
+= end
+ 1;
3493 mast_split_data(mast
, mas
, split
);
3494 mast_fill_bnode(mast
, mas
, 2);
3495 mas_split_final_node(mast
, mas
, height
+ 1);
3500 * mas_split() - Split data that is too big for one node into two.
3501 * @mas: The maple state
3502 * @b_node: The maple big node
3503 * Return: 1 on success, 0 on failure.
3505 static int mas_split(struct ma_state
*mas
, struct maple_big_node
*b_node
)
3507 struct maple_subtree_state mast
;
3509 unsigned char mid_split
, split
= 0;
3512 * Splitting is handled differently from any other B-tree; the Maple
3513 * Tree splits upwards. Splitting up means that the split operation
3514 * occurs when the walk of the tree hits the leaves and not on the way
3515 * down. The reason for splitting up is that it is impossible to know
3516 * how much space will be needed until the leaf is (or leaves are)
3517 * reached. Since overwriting data is allowed and a range could
3518 * overwrite more than one range or result in changing one entry into 3
3519 * entries, it is impossible to know if a split is required until the
3522 * Splitting is a balancing act between keeping allocations to a minimum
3523 * and avoiding a 'jitter' event where a tree is expanded to make room
3524 * for an entry followed by a contraction when the entry is removed. To
3525 * accomplish the balance, there are empty slots remaining in both left
3526 * and right nodes after a split.
3528 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3529 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3530 MA_STATE(prev_l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3531 MA_STATE(prev_r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3532 MA_TOPIARY(mat
, mas
->tree
);
3534 trace_ma_op(__func__
, mas
);
3535 mas
->depth
= mas_mt_height(mas
);
3536 /* Allocation failures will happen early. */
3537 mas_node_count(mas
, 1 + mas
->depth
* 2);
3538 if (mas_is_err(mas
))
3543 mast
.orig_l
= &prev_l_mas
;
3544 mast
.orig_r
= &prev_r_mas
;
3548 while (height
++ <= mas
->depth
) {
3549 if (mt_slots
[b_node
->type
] > b_node
->b_end
) {
3550 mas_split_final_node(&mast
, mas
, height
);
3554 l_mas
= r_mas
= *mas
;
3555 l_mas
.node
= mas_new_ma_node(mas
, b_node
);
3556 r_mas
.node
= mas_new_ma_node(mas
, b_node
);
3558 * Another way that 'jitter' is avoided is to terminate a split up early if the
3559 * left or right node has space to spare. This is referred to as "pushing left"
3560 * or "pushing right" and is similar to the B* tree, except the nodes left or
3561 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3562 * is a significant savings.
3564 /* Try to push left. */
3565 if (mas_push_data(mas
, height
, &mast
, true))
3568 /* Try to push right. */
3569 if (mas_push_data(mas
, height
, &mast
, false))
3572 split
= mab_calc_split(mas
, b_node
, &mid_split
, prev_l_mas
.min
);
3573 mast_split_data(&mast
, mas
, split
);
3575 * Usually correct, mab_mas_cp in the above call overwrites
3578 mast
.r
->max
= mas
->max
;
3579 mast_fill_bnode(&mast
, mas
, 1);
3580 prev_l_mas
= *mast
.l
;
3581 prev_r_mas
= *mast
.r
;
3584 /* Set the original node as dead */
3585 mat_add(mast
.free
, mas
->node
);
3586 mas
->node
= l_mas
.node
;
3587 mas_wmb_replace(mas
, mast
.free
, NULL
);
3588 mtree_range_walk(mas
);
3593 * mas_reuse_node() - Reuse the node to store the data.
3594 * @wr_mas: The maple write state
3595 * @bn: The maple big node
3596 * @end: The end of the data.
3598 * Will always return false in RCU mode.
3600 * Return: True if node was reused, false otherwise.
3602 static inline bool mas_reuse_node(struct ma_wr_state
*wr_mas
,
3603 struct maple_big_node
*bn
, unsigned char end
)
3605 /* Need to be rcu safe. */
3606 if (mt_in_rcu(wr_mas
->mas
->tree
))
3609 if (end
> bn
->b_end
) {
3610 int clear
= mt_slots
[wr_mas
->type
] - bn
->b_end
;
3612 memset(wr_mas
->slots
+ bn
->b_end
, 0, sizeof(void *) * clear
--);
3613 memset(wr_mas
->pivots
+ bn
->b_end
, 0, sizeof(void *) * clear
);
3615 mab_mas_cp(bn
, 0, bn
->b_end
, wr_mas
->mas
, false);
3620 * mas_commit_b_node() - Commit the big node into the tree.
3621 * @wr_mas: The maple write state
3622 * @b_node: The maple big node
3623 * @end: The end of the data.
3625 static noinline_for_kasan
int mas_commit_b_node(struct ma_wr_state
*wr_mas
,
3626 struct maple_big_node
*b_node
, unsigned char end
)
3628 struct maple_node
*node
;
3629 unsigned char b_end
= b_node
->b_end
;
3630 enum maple_type b_type
= b_node
->type
;
3632 if ((b_end
< mt_min_slots
[b_type
]) &&
3633 (!mte_is_root(wr_mas
->mas
->node
)) &&
3634 (mas_mt_height(wr_mas
->mas
) > 1))
3635 return mas_rebalance(wr_mas
->mas
, b_node
);
3637 if (b_end
>= mt_slots
[b_type
])
3638 return mas_split(wr_mas
->mas
, b_node
);
3640 if (mas_reuse_node(wr_mas
, b_node
, end
))
3643 mas_node_count(wr_mas
->mas
, 1);
3644 if (mas_is_err(wr_mas
->mas
))
3647 node
= mas_pop_node(wr_mas
->mas
);
3648 node
->parent
= mas_mn(wr_mas
->mas
)->parent
;
3649 wr_mas
->mas
->node
= mt_mk_node(node
, b_type
);
3650 mab_mas_cp(b_node
, 0, b_end
, wr_mas
->mas
, false);
3651 mas_replace(wr_mas
->mas
, false);
3653 mas_update_gap(wr_mas
->mas
);
3658 * mas_root_expand() - Expand a root to a node
3659 * @mas: The maple state
3660 * @entry: The entry to store into the tree
3662 static inline int mas_root_expand(struct ma_state
*mas
, void *entry
)
3664 void *contents
= mas_root_locked(mas
);
3665 enum maple_type type
= maple_leaf_64
;
3666 struct maple_node
*node
;
3668 unsigned long *pivots
;
3671 mas_node_count(mas
, 1);
3672 if (unlikely(mas_is_err(mas
)))
3675 node
= mas_pop_node(mas
);
3676 pivots
= ma_pivots(node
, type
);
3677 slots
= ma_slots(node
, type
);
3678 node
->parent
= ma_parent_ptr(
3679 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
3680 mas
->node
= mt_mk_node(node
, type
);
3684 rcu_assign_pointer(slots
[slot
], contents
);
3685 if (likely(mas
->index
> 1))
3688 pivots
[slot
++] = mas
->index
- 1;
3691 rcu_assign_pointer(slots
[slot
], entry
);
3693 pivots
[slot
] = mas
->last
;
3694 if (mas
->last
!= ULONG_MAX
)
3697 mas_set_height(mas
);
3698 ma_set_meta(node
, maple_leaf_64
, 0, slot
);
3699 /* swap the new root into the tree */
3700 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3704 static inline void mas_store_root(struct ma_state
*mas
, void *entry
)
3706 if (likely((mas
->last
!= 0) || (mas
->index
!= 0)))
3707 mas_root_expand(mas
, entry
);
3708 else if (((unsigned long) (entry
) & 3) == 2)
3709 mas_root_expand(mas
, entry
);
3711 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3712 mas
->node
= MAS_START
;
3717 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3719 * @mas: The maple state
3720 * @piv: The pivot value being written
3721 * @type: The maple node type
3722 * @entry: The data to write
3724 * Spanning writes are writes that start in one node and end in another OR if
3725 * the write of a %NULL will cause the node to end with a %NULL.
3727 * Return: True if this is a spanning write, false otherwise.
3729 static bool mas_is_span_wr(struct ma_wr_state
*wr_mas
)
3731 unsigned long max
= wr_mas
->r_max
;
3732 unsigned long last
= wr_mas
->mas
->last
;
3733 enum maple_type type
= wr_mas
->type
;
3734 void *entry
= wr_mas
->entry
;
3736 /* Contained in this pivot, fast path */
3740 if (ma_is_leaf(type
)) {
3741 max
= wr_mas
->mas
->max
;
3748 * The last entry of leaf node cannot be NULL unless it is the
3749 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3751 if (entry
|| last
== ULONG_MAX
)
3755 trace_ma_write(__func__
, wr_mas
->mas
, wr_mas
->r_max
, entry
);
3759 static inline void mas_wr_walk_descend(struct ma_wr_state
*wr_mas
)
3761 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
3762 mas_wr_node_walk(wr_mas
);
3763 wr_mas
->slots
= ma_slots(wr_mas
->node
, wr_mas
->type
);
3766 static inline void mas_wr_walk_traverse(struct ma_wr_state
*wr_mas
)
3768 wr_mas
->mas
->max
= wr_mas
->r_max
;
3769 wr_mas
->mas
->min
= wr_mas
->r_min
;
3770 wr_mas
->mas
->node
= wr_mas
->content
;
3771 wr_mas
->mas
->offset
= 0;
3772 wr_mas
->mas
->depth
++;
3775 * mas_wr_walk() - Walk the tree for a write.
3776 * @wr_mas: The maple write state
3778 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3780 * Return: True if it's contained in a node, false on spanning write.
3782 static bool mas_wr_walk(struct ma_wr_state
*wr_mas
)
3784 struct ma_state
*mas
= wr_mas
->mas
;
3787 mas_wr_walk_descend(wr_mas
);
3788 if (unlikely(mas_is_span_wr(wr_mas
)))
3791 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3793 if (ma_is_leaf(wr_mas
->type
))
3796 mas_wr_walk_traverse(wr_mas
);
3802 static bool mas_wr_walk_index(struct ma_wr_state
*wr_mas
)
3804 struct ma_state
*mas
= wr_mas
->mas
;
3807 mas_wr_walk_descend(wr_mas
);
3808 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3810 if (ma_is_leaf(wr_mas
->type
))
3812 mas_wr_walk_traverse(wr_mas
);
3818 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3819 * @l_wr_mas: The left maple write state
3820 * @r_wr_mas: The right maple write state
3822 static inline void mas_extend_spanning_null(struct ma_wr_state
*l_wr_mas
,
3823 struct ma_wr_state
*r_wr_mas
)
3825 struct ma_state
*r_mas
= r_wr_mas
->mas
;
3826 struct ma_state
*l_mas
= l_wr_mas
->mas
;
3827 unsigned char l_slot
;
3829 l_slot
= l_mas
->offset
;
3830 if (!l_wr_mas
->content
)
3831 l_mas
->index
= l_wr_mas
->r_min
;
3833 if ((l_mas
->index
== l_wr_mas
->r_min
) &&
3835 !mas_slot_locked(l_mas
, l_wr_mas
->slots
, l_slot
- 1))) {
3837 l_mas
->index
= l_wr_mas
->pivots
[l_slot
- 2] + 1;
3839 l_mas
->index
= l_mas
->min
;
3841 l_mas
->offset
= l_slot
- 1;
3844 if (!r_wr_mas
->content
) {
3845 if (r_mas
->last
< r_wr_mas
->r_max
)
3846 r_mas
->last
= r_wr_mas
->r_max
;
3848 } else if ((r_mas
->last
== r_wr_mas
->r_max
) &&
3849 (r_mas
->last
< r_mas
->max
) &&
3850 !mas_slot_locked(r_mas
, r_wr_mas
->slots
, r_mas
->offset
+ 1)) {
3851 r_mas
->last
= mas_safe_pivot(r_mas
, r_wr_mas
->pivots
,
3852 r_wr_mas
->type
, r_mas
->offset
+ 1);
3857 static inline void *mas_state_walk(struct ma_state
*mas
)
3861 entry
= mas_start(mas
);
3862 if (mas_is_none(mas
))
3865 if (mas_is_ptr(mas
))
3868 return mtree_range_walk(mas
);
3872 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3875 * @mas: The maple state.
3877 * Note: Leaves mas in undesirable state.
3878 * Return: The entry for @mas->index or %NULL on dead node.
3880 static inline void *mtree_lookup_walk(struct ma_state
*mas
)
3882 unsigned long *pivots
;
3883 unsigned char offset
;
3884 struct maple_node
*node
;
3885 struct maple_enode
*next
;
3886 enum maple_type type
;
3895 node
= mte_to_node(next
);
3896 type
= mte_node_type(next
);
3897 pivots
= ma_pivots(node
, type
);
3898 end
= ma_data_end(node
, type
, pivots
, max
);
3899 if (unlikely(ma_dead_node(node
)))
3902 if (pivots
[offset
] >= mas
->index
) {
3903 max
= pivots
[offset
];
3906 } while (++offset
< end
);
3908 slots
= ma_slots(node
, type
);
3909 next
= mt_slot(mas
->tree
, slots
, offset
);
3910 if (unlikely(ma_dead_node(node
)))
3912 } while (!ma_is_leaf(type
));
3914 return (void *)next
;
3922 * mas_new_root() - Create a new root node that only contains the entry passed
3924 * @mas: The maple state
3925 * @entry: The entry to store.
3927 * Only valid when the index == 0 and the last == ULONG_MAX
3929 * Return 0 on error, 1 on success.
3931 static inline int mas_new_root(struct ma_state
*mas
, void *entry
)
3933 struct maple_enode
*root
= mas_root_locked(mas
);
3934 enum maple_type type
= maple_leaf_64
;
3935 struct maple_node
*node
;
3937 unsigned long *pivots
;
3939 if (!entry
&& !mas
->index
&& mas
->last
== ULONG_MAX
) {
3941 mas_set_height(mas
);
3942 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3943 mas
->node
= MAS_START
;
3947 mas_node_count(mas
, 1);
3948 if (mas_is_err(mas
))
3951 node
= mas_pop_node(mas
);
3952 pivots
= ma_pivots(node
, type
);
3953 slots
= ma_slots(node
, type
);
3954 node
->parent
= ma_parent_ptr(
3955 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
3956 mas
->node
= mt_mk_node(node
, type
);
3957 rcu_assign_pointer(slots
[0], entry
);
3958 pivots
[0] = mas
->last
;
3960 mas_set_height(mas
);
3961 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3964 if (xa_is_node(root
))
3965 mte_destroy_walk(root
, mas
->tree
);
3970 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3971 * and new nodes where necessary, then place the sub-tree in the actual tree.
3972 * Note that mas is expected to point to the node which caused the store to
3974 * @wr_mas: The maple write state
3976 * Return: 0 on error, positive on success.
3978 static inline int mas_wr_spanning_store(struct ma_wr_state
*wr_mas
)
3980 struct maple_subtree_state mast
;
3981 struct maple_big_node b_node
;
3982 struct ma_state
*mas
;
3983 unsigned char height
;
3985 /* Left and Right side of spanning store */
3986 MA_STATE(l_mas
, NULL
, 0, 0);
3987 MA_STATE(r_mas
, NULL
, 0, 0);
3989 MA_WR_STATE(r_wr_mas
, &r_mas
, wr_mas
->entry
);
3990 MA_WR_STATE(l_wr_mas
, &l_mas
, wr_mas
->entry
);
3993 * A store operation that spans multiple nodes is called a spanning
3994 * store and is handled early in the store call stack by the function
3995 * mas_is_span_wr(). When a spanning store is identified, the maple
3996 * state is duplicated. The first maple state walks the left tree path
3997 * to ``index``, the duplicate walks the right tree path to ``last``.
3998 * The data in the two nodes are combined into a single node, two nodes,
3999 * or possibly three nodes (see the 3-way split above). A ``NULL``
4000 * written to the last entry of a node is considered a spanning store as
4001 * a rebalance is required for the operation to complete and an overflow
4002 * of data may happen.
4005 trace_ma_op(__func__
, mas
);
4007 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
4008 return mas_new_root(mas
, wr_mas
->entry
);
4010 * Node rebalancing may occur due to this store, so there may be three new
4011 * entries per level plus a new root.
4013 height
= mas_mt_height(mas
);
4014 mas_node_count(mas
, 1 + height
* 3);
4015 if (mas_is_err(mas
))
4019 * Set up right side. Need to get to the next offset after the spanning
4020 * store to ensure it's not NULL and to combine both the next node and
4021 * the node with the start together.
4024 /* Avoid overflow, walk to next slot in the tree. */
4028 r_mas
.index
= r_mas
.last
;
4029 mas_wr_walk_index(&r_wr_mas
);
4030 r_mas
.last
= r_mas
.index
= mas
->last
;
4032 /* Set up left side. */
4034 mas_wr_walk_index(&l_wr_mas
);
4036 if (!wr_mas
->entry
) {
4037 mas_extend_spanning_null(&l_wr_mas
, &r_wr_mas
);
4038 mas
->offset
= l_mas
.offset
;
4039 mas
->index
= l_mas
.index
;
4040 mas
->last
= l_mas
.last
= r_mas
.last
;
4043 /* expanding NULLs may make this cover the entire range */
4044 if (!l_mas
.index
&& r_mas
.last
== ULONG_MAX
) {
4045 mas_set_range(mas
, 0, ULONG_MAX
);
4046 return mas_new_root(mas
, wr_mas
->entry
);
4049 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4050 /* Copy l_mas and store the value in b_node. */
4051 mas_store_b_node(&l_wr_mas
, &b_node
, l_wr_mas
.node_end
);
4052 /* Copy r_mas into b_node. */
4053 if (r_mas
.offset
<= r_wr_mas
.node_end
)
4054 mas_mab_cp(&r_mas
, r_mas
.offset
, r_wr_mas
.node_end
,
4055 &b_node
, b_node
.b_end
+ 1);
4059 /* Stop spanning searches by searching for just index. */
4060 l_mas
.index
= l_mas
.last
= mas
->index
;
4063 mast
.orig_l
= &l_mas
;
4064 mast
.orig_r
= &r_mas
;
4065 /* Combine l_mas and r_mas and split them up evenly again. */
4066 return mas_spanning_rebalance(mas
, &mast
, height
+ 1);
4070 * mas_wr_node_store() - Attempt to store the value in a node
4071 * @wr_mas: The maple write state
4073 * Attempts to reuse the node, but may allocate.
4075 * Return: True if stored, false otherwise
4077 static inline bool mas_wr_node_store(struct ma_wr_state
*wr_mas
,
4078 unsigned char new_end
)
4080 struct ma_state
*mas
= wr_mas
->mas
;
4081 void __rcu
**dst_slots
;
4082 unsigned long *dst_pivots
;
4083 unsigned char dst_offset
, offset_end
= wr_mas
->offset_end
;
4084 struct maple_node reuse
, *newnode
;
4085 unsigned char copy_size
, node_pivots
= mt_pivots
[wr_mas
->type
];
4086 bool in_rcu
= mt_in_rcu(mas
->tree
);
4088 /* Check if there is enough data. The room is enough. */
4089 if (!mte_is_root(mas
->node
) && (new_end
<= mt_min_slots
[wr_mas
->type
]) &&
4090 !(mas
->mas_flags
& MA_STATE_BULK
))
4093 if (mas
->last
== wr_mas
->end_piv
)
4094 offset_end
++; /* don't copy this offset */
4095 else if (unlikely(wr_mas
->r_max
== ULONG_MAX
))
4096 mas_bulk_rebalance(mas
, wr_mas
->node_end
, wr_mas
->type
);
4100 mas_node_count(mas
, 1);
4101 if (mas_is_err(mas
))
4104 newnode
= mas_pop_node(mas
);
4106 memset(&reuse
, 0, sizeof(struct maple_node
));
4110 newnode
->parent
= mas_mn(mas
)->parent
;
4111 dst_pivots
= ma_pivots(newnode
, wr_mas
->type
);
4112 dst_slots
= ma_slots(newnode
, wr_mas
->type
);
4113 /* Copy from start to insert point */
4114 memcpy(dst_pivots
, wr_mas
->pivots
, sizeof(unsigned long) * mas
->offset
);
4115 memcpy(dst_slots
, wr_mas
->slots
, sizeof(void *) * mas
->offset
);
4117 /* Handle insert of new range starting after old range */
4118 if (wr_mas
->r_min
< mas
->index
) {
4119 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->content
);
4120 dst_pivots
[mas
->offset
++] = mas
->index
- 1;
4123 /* Store the new entry and range end. */
4124 if (mas
->offset
< node_pivots
)
4125 dst_pivots
[mas
->offset
] = mas
->last
;
4126 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->entry
);
4129 * this range wrote to the end of the node or it overwrote the rest of
4132 if (offset_end
> wr_mas
->node_end
)
4135 dst_offset
= mas
->offset
+ 1;
4136 /* Copy to the end of node if necessary. */
4137 copy_size
= wr_mas
->node_end
- offset_end
+ 1;
4138 memcpy(dst_slots
+ dst_offset
, wr_mas
->slots
+ offset_end
,
4139 sizeof(void *) * copy_size
);
4140 memcpy(dst_pivots
+ dst_offset
, wr_mas
->pivots
+ offset_end
,
4141 sizeof(unsigned long) * (copy_size
- 1));
4143 if (new_end
< node_pivots
)
4144 dst_pivots
[new_end
] = mas
->max
;
4147 mas_leaf_set_meta(mas
, newnode
, dst_pivots
, maple_leaf_64
, new_end
);
4149 mte_set_node_dead(mas
->node
);
4150 mas
->node
= mt_mk_node(newnode
, wr_mas
->type
);
4151 mas_replace(mas
, false);
4153 memcpy(wr_mas
->node
, newnode
, sizeof(struct maple_node
));
4155 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
4156 mas_update_gap(mas
);
4161 * mas_wr_slot_store: Attempt to store a value in a slot.
4162 * @wr_mas: the maple write state
4164 * Return: True if stored, false otherwise
4166 static inline bool mas_wr_slot_store(struct ma_wr_state
*wr_mas
)
4168 struct ma_state
*mas
= wr_mas
->mas
;
4169 unsigned char offset
= mas
->offset
;
4172 if (wr_mas
->offset_end
- offset
!= 1)
4175 gap
|= !mt_slot_locked(mas
->tree
, wr_mas
->slots
, offset
);
4176 gap
|= !mt_slot_locked(mas
->tree
, wr_mas
->slots
, offset
+ 1);
4178 if (mas
->index
== wr_mas
->r_min
) {
4179 /* Overwriting the range and over a part of the next range. */
4180 rcu_assign_pointer(wr_mas
->slots
[offset
], wr_mas
->entry
);
4181 wr_mas
->pivots
[offset
] = mas
->last
;
4183 /* Overwriting a part of the range and over the next range */
4184 rcu_assign_pointer(wr_mas
->slots
[offset
+ 1], wr_mas
->entry
);
4185 wr_mas
->pivots
[offset
] = mas
->index
- 1;
4186 mas
->offset
++; /* Keep mas accurate. */
4189 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
4191 * Only update gap when the new entry is empty or there is an empty
4192 * entry in the original two ranges.
4194 if (!wr_mas
->entry
|| gap
)
4195 mas_update_gap(mas
);
4200 static inline void mas_wr_end_piv(struct ma_wr_state
*wr_mas
)
4202 while ((wr_mas
->offset_end
< wr_mas
->node_end
) &&
4203 (wr_mas
->mas
->last
> wr_mas
->pivots
[wr_mas
->offset_end
]))
4204 wr_mas
->offset_end
++;
4206 if (wr_mas
->offset_end
< wr_mas
->node_end
)
4207 wr_mas
->end_piv
= wr_mas
->pivots
[wr_mas
->offset_end
];
4209 wr_mas
->end_piv
= wr_mas
->mas
->max
;
4212 static inline void mas_wr_extend_null(struct ma_wr_state
*wr_mas
)
4214 struct ma_state
*mas
= wr_mas
->mas
;
4216 if (!wr_mas
->slots
[wr_mas
->offset_end
]) {
4217 /* If this one is null, the next and prev are not */
4218 mas
->last
= wr_mas
->end_piv
;
4220 /* Check next slot(s) if we are overwriting the end */
4221 if ((mas
->last
== wr_mas
->end_piv
) &&
4222 (wr_mas
->node_end
!= wr_mas
->offset_end
) &&
4223 !wr_mas
->slots
[wr_mas
->offset_end
+ 1]) {
4224 wr_mas
->offset_end
++;
4225 if (wr_mas
->offset_end
== wr_mas
->node_end
)
4226 mas
->last
= mas
->max
;
4228 mas
->last
= wr_mas
->pivots
[wr_mas
->offset_end
];
4229 wr_mas
->end_piv
= mas
->last
;
4233 if (!wr_mas
->content
) {
4234 /* If this one is null, the next and prev are not */
4235 mas
->index
= wr_mas
->r_min
;
4237 /* Check prev slot if we are overwriting the start */
4238 if (mas
->index
== wr_mas
->r_min
&& mas
->offset
&&
4239 !wr_mas
->slots
[mas
->offset
- 1]) {
4241 wr_mas
->r_min
= mas
->index
=
4242 mas_safe_min(mas
, wr_mas
->pivots
, mas
->offset
);
4243 wr_mas
->r_max
= wr_mas
->pivots
[mas
->offset
];
4248 static inline unsigned char mas_wr_new_end(struct ma_wr_state
*wr_mas
)
4250 struct ma_state
*mas
= wr_mas
->mas
;
4251 unsigned char new_end
= wr_mas
->node_end
+ 2;
4253 new_end
-= wr_mas
->offset_end
- mas
->offset
;
4254 if (wr_mas
->r_min
== mas
->index
)
4257 if (wr_mas
->end_piv
== mas
->last
)
4264 * mas_wr_append: Attempt to append
4265 * @wr_mas: the maple write state
4267 * Return: True if appended, false otherwise
4269 static inline bool mas_wr_append(struct ma_wr_state
*wr_mas
)
4271 unsigned char end
= wr_mas
->node_end
;
4272 unsigned char new_end
= end
+ 1;
4273 struct ma_state
*mas
= wr_mas
->mas
;
4274 unsigned char node_pivots
= mt_pivots
[wr_mas
->type
];
4276 if (mas
->offset
!= wr_mas
->node_end
)
4279 if (new_end
< node_pivots
) {
4280 wr_mas
->pivots
[new_end
] = wr_mas
->pivots
[end
];
4281 ma_set_meta(wr_mas
->node
, maple_leaf_64
, 0, new_end
);
4284 if (mas
->last
== wr_mas
->r_max
) {
4285 /* Append to end of range */
4286 rcu_assign_pointer(wr_mas
->slots
[new_end
], wr_mas
->entry
);
4287 wr_mas
->pivots
[end
] = mas
->index
- 1;
4288 mas
->offset
= new_end
;
4290 /* Append to start of range */
4291 rcu_assign_pointer(wr_mas
->slots
[new_end
], wr_mas
->content
);
4292 wr_mas
->pivots
[end
] = mas
->last
;
4293 rcu_assign_pointer(wr_mas
->slots
[end
], wr_mas
->entry
);
4296 if (!wr_mas
->content
|| !wr_mas
->entry
)
4297 mas_update_gap(mas
);
4303 * mas_wr_bnode() - Slow path for a modification.
4304 * @wr_mas: The write maple state
4306 * This is where split, rebalance end up.
4308 static void mas_wr_bnode(struct ma_wr_state
*wr_mas
)
4310 struct maple_big_node b_node
;
4312 trace_ma_write(__func__
, wr_mas
->mas
, 0, wr_mas
->entry
);
4313 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4314 mas_store_b_node(wr_mas
, &b_node
, wr_mas
->offset_end
);
4315 mas_commit_b_node(wr_mas
, &b_node
, wr_mas
->node_end
);
4318 static inline void mas_wr_modify(struct ma_wr_state
*wr_mas
)
4320 struct ma_state
*mas
= wr_mas
->mas
;
4321 unsigned char new_end
;
4323 /* Direct replacement */
4324 if (wr_mas
->r_min
== mas
->index
&& wr_mas
->r_max
== mas
->last
) {
4325 rcu_assign_pointer(wr_mas
->slots
[mas
->offset
], wr_mas
->entry
);
4326 if (!!wr_mas
->entry
^ !!wr_mas
->content
)
4327 mas_update_gap(mas
);
4332 * new_end exceeds the size of the maple node and cannot enter the fast
4335 new_end
= mas_wr_new_end(wr_mas
);
4336 if (new_end
>= mt_slots
[wr_mas
->type
])
4339 /* Attempt to append */
4340 if (new_end
== wr_mas
->node_end
+ 1 && mas_wr_append(wr_mas
))
4343 if (new_end
== wr_mas
->node_end
&& mas_wr_slot_store(wr_mas
))
4346 if (mas_wr_node_store(wr_mas
, new_end
))
4349 if (mas_is_err(mas
))
4353 mas_wr_bnode(wr_mas
);
4357 * mas_wr_store_entry() - Internal call to store a value
4358 * @mas: The maple state
4359 * @entry: The entry to store.
4361 * Return: The contents that was stored at the index.
4363 static inline void *mas_wr_store_entry(struct ma_wr_state
*wr_mas
)
4365 struct ma_state
*mas
= wr_mas
->mas
;
4367 wr_mas
->content
= mas_start(mas
);
4368 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4369 mas_store_root(mas
, wr_mas
->entry
);
4370 return wr_mas
->content
;
4373 if (unlikely(!mas_wr_walk(wr_mas
))) {
4374 mas_wr_spanning_store(wr_mas
);
4375 return wr_mas
->content
;
4378 /* At this point, we are at the leaf node that needs to be altered. */
4379 mas_wr_end_piv(wr_mas
);
4382 mas_wr_extend_null(wr_mas
);
4384 /* New root for a single pointer */
4385 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
)) {
4386 mas_new_root(mas
, wr_mas
->entry
);
4387 return wr_mas
->content
;
4390 mas_wr_modify(wr_mas
);
4391 return wr_mas
->content
;
4395 * mas_insert() - Internal call to insert a value
4396 * @mas: The maple state
4397 * @entry: The entry to store
4399 * Return: %NULL or the contents that already exists at the requested index
4400 * otherwise. The maple state needs to be checked for error conditions.
4402 static inline void *mas_insert(struct ma_state
*mas
, void *entry
)
4404 MA_WR_STATE(wr_mas
, mas
, entry
);
4407 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4408 * tree. If the insert fits exactly into an existing gap with a value
4409 * of NULL, then the slot only needs to be written with the new value.
4410 * If the range being inserted is adjacent to another range, then only a
4411 * single pivot needs to be inserted (as well as writing the entry). If
4412 * the new range is within a gap but does not touch any other ranges,
4413 * then two pivots need to be inserted: the start - 1, and the end. As
4414 * usual, the entry must be written. Most operations require a new node
4415 * to be allocated and replace an existing node to ensure RCU safety,
4416 * when in RCU mode. The exception to requiring a newly allocated node
4417 * is when inserting at the end of a node (appending). When done
4418 * carefully, appending can reuse the node in place.
4420 wr_mas
.content
= mas_start(mas
);
4424 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4425 mas_store_root(mas
, entry
);
4429 /* spanning writes always overwrite something */
4430 if (!mas_wr_walk(&wr_mas
))
4433 /* At this point, we are at the leaf node that needs to be altered. */
4434 wr_mas
.offset_end
= mas
->offset
;
4435 wr_mas
.end_piv
= wr_mas
.r_max
;
4437 if (wr_mas
.content
|| (mas
->last
> wr_mas
.r_max
))
4443 mas_wr_modify(&wr_mas
);
4444 return wr_mas
.content
;
4447 mas_set_err(mas
, -EEXIST
);
4448 return wr_mas
.content
;
4452 static inline void mas_rewalk(struct ma_state
*mas
, unsigned long index
)
4455 mas_set(mas
, index
);
4456 mas_state_walk(mas
);
4457 if (mas_is_start(mas
))
4461 static inline bool mas_rewalk_if_dead(struct ma_state
*mas
,
4462 struct maple_node
*node
, const unsigned long index
)
4464 if (unlikely(ma_dead_node(node
))) {
4465 mas_rewalk(mas
, index
);
4472 * mas_prev_node() - Find the prev non-null entry at the same level in the
4473 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4474 * @mas: The maple state
4475 * @min: The lower limit to search
4477 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4478 * Return: 1 if the node is dead, 0 otherwise.
4480 static inline int mas_prev_node(struct ma_state
*mas
, unsigned long min
)
4485 struct maple_node
*node
;
4486 unsigned long *pivots
;
4499 if (ma_is_root(node
))
4503 if (unlikely(mas_ascend(mas
)))
4505 offset
= mas
->offset
;
4511 mt
= mte_node_type(mas
->node
);
4514 slots
= ma_slots(node
, mt
);
4515 mas
->node
= mas_slot(mas
, slots
, offset
);
4516 if (unlikely(ma_dead_node(node
)))
4519 mt
= mte_node_type(mas
->node
);
4521 pivots
= ma_pivots(node
, mt
);
4522 offset
= ma_data_end(node
, mt
, pivots
, max
);
4523 if (unlikely(ma_dead_node(node
)))
4527 slots
= ma_slots(node
, mt
);
4528 mas
->node
= mas_slot(mas
, slots
, offset
);
4529 pivots
= ma_pivots(node
, mt
);
4530 if (unlikely(ma_dead_node(node
)))
4534 mas
->min
= pivots
[offset
- 1] + 1;
4536 mas
->offset
= mas_data_end(mas
);
4537 if (unlikely(mte_dead_node(mas
->node
)))
4543 if (unlikely(ma_dead_node(node
)))
4546 mas
->node
= MAS_NONE
;
4551 * mas_prev_slot() - Get the entry in the previous slot
4553 * @mas: The maple state
4554 * @max: The minimum starting range
4556 * Return: The entry in the previous slot which is possibly NULL
4558 static void *mas_prev_slot(struct ma_state
*mas
, unsigned long min
, bool empty
)
4562 unsigned long pivot
;
4563 enum maple_type type
;
4564 unsigned long *pivots
;
4565 struct maple_node
*node
;
4566 unsigned long save_point
= mas
->index
;
4570 type
= mte_node_type(mas
->node
);
4571 pivots
= ma_pivots(node
, type
);
4572 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4576 if (mas
->min
<= min
) {
4577 pivot
= mas_safe_min(mas
, pivots
, mas
->offset
);
4579 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4586 if (likely(mas
->offset
)) {
4588 mas
->last
= mas
->index
- 1;
4589 mas
->index
= mas_safe_min(mas
, pivots
, mas
->offset
);
4591 if (mas_prev_node(mas
, min
)) {
4592 mas_rewalk(mas
, save_point
);
4596 if (mas_is_none(mas
))
4599 mas
->last
= mas
->max
;
4601 type
= mte_node_type(mas
->node
);
4602 pivots
= ma_pivots(node
, type
);
4603 mas
->index
= pivots
[mas
->offset
- 1] + 1;
4606 slots
= ma_slots(node
, type
);
4607 entry
= mas_slot(mas
, slots
, mas
->offset
);
4608 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4621 * mas_next_node() - Get the next node at the same level in the tree.
4622 * @mas: The maple state
4623 * @max: The maximum pivot value to check.
4625 * The next value will be mas->node[mas->offset] or MAS_NONE.
4626 * Return: 1 on dead node, 0 otherwise.
4628 static inline int mas_next_node(struct ma_state
*mas
, struct maple_node
*node
,
4632 unsigned long *pivots
;
4633 struct maple_enode
*enode
;
4635 unsigned char node_end
;
4639 if (mas
->max
>= max
)
4645 if (ma_is_root(node
))
4649 if (unlikely(mas_ascend(mas
)))
4654 mt
= mte_node_type(mas
->node
);
4655 pivots
= ma_pivots(node
, mt
);
4656 node_end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
4657 if (unlikely(ma_dead_node(node
)))
4660 } while (unlikely(mas
->offset
== node_end
));
4662 slots
= ma_slots(node
, mt
);
4664 enode
= mas_slot(mas
, slots
, mas
->offset
);
4665 if (unlikely(ma_dead_node(node
)))
4671 while (unlikely(level
> 1)) {
4675 mt
= mte_node_type(mas
->node
);
4676 slots
= ma_slots(node
, mt
);
4677 enode
= mas_slot(mas
, slots
, 0);
4678 if (unlikely(ma_dead_node(node
)))
4683 pivots
= ma_pivots(node
, mt
);
4685 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, mt
);
4686 if (unlikely(ma_dead_node(node
)))
4694 if (unlikely(ma_dead_node(node
)))
4697 mas
->node
= MAS_NONE
;
4702 * mas_next_slot() - Get the entry in the next slot
4704 * @mas: The maple state
4705 * @max: The maximum starting range
4706 * @empty: Can be empty
4708 * Return: The entry in the next slot which is possibly NULL
4710 static void *mas_next_slot(struct ma_state
*mas
, unsigned long max
, bool empty
)
4713 unsigned long *pivots
;
4714 unsigned long pivot
;
4715 enum maple_type type
;
4716 struct maple_node
*node
;
4717 unsigned char data_end
;
4718 unsigned long save_point
= mas
->last
;
4723 type
= mte_node_type(mas
->node
);
4724 pivots
= ma_pivots(node
, type
);
4725 data_end
= ma_data_end(node
, type
, pivots
, mas
->max
);
4726 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4730 if (mas
->max
>= max
) {
4731 if (likely(mas
->offset
< data_end
))
4732 pivot
= pivots
[mas
->offset
];
4734 return NULL
; /* must be mas->max */
4736 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4743 if (likely(mas
->offset
< data_end
)) {
4744 mas
->index
= pivots
[mas
->offset
] + 1;
4746 if (likely(mas
->offset
< data_end
))
4747 mas
->last
= pivots
[mas
->offset
];
4749 mas
->last
= mas
->max
;
4751 if (mas_next_node(mas
, node
, max
)) {
4752 mas_rewalk(mas
, save_point
);
4756 if (mas_is_none(mas
))
4760 mas
->index
= mas
->min
;
4762 type
= mte_node_type(mas
->node
);
4763 pivots
= ma_pivots(node
, type
);
4764 mas
->last
= pivots
[0];
4767 slots
= ma_slots(node
, type
);
4768 entry
= mt_slot(mas
->tree
, slots
, mas
->offset
);
4769 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4785 * mas_next_entry() - Internal function to get the next entry.
4786 * @mas: The maple state
4787 * @limit: The maximum range start.
4789 * Set the @mas->node to the next entry and the range_start to
4790 * the beginning value for the entry. Does not check beyond @limit.
4791 * Sets @mas->index and @mas->last to the limit if it is hit.
4792 * Restarts on dead nodes.
4794 * Return: the next entry or %NULL.
4796 static inline void *mas_next_entry(struct ma_state
*mas
, unsigned long limit
)
4798 if (mas
->last
>= limit
)
4801 return mas_next_slot(mas
, limit
, false);
4805 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4806 * highest gap address of a given size in a given node and descend.
4807 * @mas: The maple state
4808 * @size: The needed size.
4810 * Return: True if found in a leaf, false otherwise.
4813 static bool mas_rev_awalk(struct ma_state
*mas
, unsigned long size
,
4814 unsigned long *gap_min
, unsigned long *gap_max
)
4816 enum maple_type type
= mte_node_type(mas
->node
);
4817 struct maple_node
*node
= mas_mn(mas
);
4818 unsigned long *pivots
, *gaps
;
4820 unsigned long gap
= 0;
4821 unsigned long max
, min
;
4822 unsigned char offset
;
4824 if (unlikely(mas_is_err(mas
)))
4827 if (ma_is_dense(type
)) {
4829 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4833 pivots
= ma_pivots(node
, type
);
4834 slots
= ma_slots(node
, type
);
4835 gaps
= ma_gaps(node
, type
);
4836 offset
= mas
->offset
;
4837 min
= mas_safe_min(mas
, pivots
, offset
);
4838 /* Skip out of bounds. */
4839 while (mas
->last
< min
)
4840 min
= mas_safe_min(mas
, pivots
, --offset
);
4842 max
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4843 while (mas
->index
<= max
) {
4847 else if (!mas_slot(mas
, slots
, offset
))
4848 gap
= max
- min
+ 1;
4851 if ((size
<= gap
) && (size
<= mas
->last
- min
+ 1))
4855 /* Skip the next slot, it cannot be a gap. */
4860 max
= pivots
[offset
];
4861 min
= mas_safe_min(mas
, pivots
, offset
);
4871 min
= mas_safe_min(mas
, pivots
, offset
);
4874 if (unlikely((mas
->index
> max
) || (size
- 1 > max
- mas
->index
)))
4877 if (unlikely(ma_is_leaf(type
))) {
4878 mas
->offset
= offset
;
4880 *gap_max
= min
+ gap
- 1;
4884 /* descend, only happens under lock. */
4885 mas
->node
= mas_slot(mas
, slots
, offset
);
4888 mas
->offset
= mas_data_end(mas
);
4892 if (!mte_is_root(mas
->node
))
4896 mas_set_err(mas
, -EBUSY
);
4900 static inline bool mas_anode_descend(struct ma_state
*mas
, unsigned long size
)
4902 enum maple_type type
= mte_node_type(mas
->node
);
4903 unsigned long pivot
, min
, gap
= 0;
4904 unsigned char offset
, data_end
;
4905 unsigned long *gaps
, *pivots
;
4907 struct maple_node
*node
;
4910 if (ma_is_dense(type
)) {
4911 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4916 pivots
= ma_pivots(node
, type
);
4917 slots
= ma_slots(node
, type
);
4918 gaps
= ma_gaps(node
, type
);
4919 offset
= mas
->offset
;
4920 min
= mas_safe_min(mas
, pivots
, offset
);
4921 data_end
= ma_data_end(node
, type
, pivots
, mas
->max
);
4922 for (; offset
<= data_end
; offset
++) {
4923 pivot
= mas_logical_pivot(mas
, pivots
, offset
, type
);
4925 /* Not within lower bounds */
4926 if (mas
->index
> pivot
)
4931 else if (!mas_slot(mas
, slots
, offset
))
4932 gap
= min(pivot
, mas
->last
) - max(mas
->index
, min
) + 1;
4937 if (ma_is_leaf(type
)) {
4941 if (mas
->index
<= pivot
) {
4942 mas
->node
= mas_slot(mas
, slots
, offset
);
4951 if (mas
->last
<= pivot
) {
4952 mas_set_err(mas
, -EBUSY
);
4957 if (mte_is_root(mas
->node
))
4960 mas
->offset
= offset
;
4965 * mas_walk() - Search for @mas->index in the tree.
4966 * @mas: The maple state.
4968 * mas->index and mas->last will be set to the range if there is a value. If
4969 * mas->node is MAS_NONE, reset to MAS_START.
4971 * Return: the entry at the location or %NULL.
4973 void *mas_walk(struct ma_state
*mas
)
4977 if (mas_is_none(mas
) || mas_is_paused(mas
) || mas_is_ptr(mas
))
4978 mas
->node
= MAS_START
;
4980 entry
= mas_state_walk(mas
);
4981 if (mas_is_start(mas
)) {
4983 } else if (mas_is_none(mas
)) {
4985 mas
->last
= ULONG_MAX
;
4986 } else if (mas_is_ptr(mas
)) {
4993 mas
->last
= ULONG_MAX
;
4994 mas
->node
= MAS_NONE
;
5000 EXPORT_SYMBOL_GPL(mas_walk
);
5002 static inline bool mas_rewind_node(struct ma_state
*mas
)
5007 if (mte_is_root(mas
->node
)) {
5017 mas
->offset
= --slot
;
5022 * mas_skip_node() - Internal function. Skip over a node.
5023 * @mas: The maple state.
5025 * Return: true if there is another node, false otherwise.
5027 static inline bool mas_skip_node(struct ma_state
*mas
)
5029 if (mas_is_err(mas
))
5033 if (mte_is_root(mas
->node
)) {
5034 if (mas
->offset
>= mas_data_end(mas
)) {
5035 mas_set_err(mas
, -EBUSY
);
5041 } while (mas
->offset
>= mas_data_end(mas
));
5048 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5050 * @mas: The maple state
5051 * @size: The size of the gap required
5053 * Search between @mas->index and @mas->last for a gap of @size.
5055 static inline void mas_awalk(struct ma_state
*mas
, unsigned long size
)
5057 struct maple_enode
*last
= NULL
;
5060 * There are 4 options:
5061 * go to child (descend)
5062 * go back to parent (ascend)
5063 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5064 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5066 while (!mas_is_err(mas
) && !mas_anode_descend(mas
, size
)) {
5067 if (last
== mas
->node
)
5075 * mas_sparse_area() - Internal function. Return upper or lower limit when
5076 * searching for a gap in an empty tree.
5077 * @mas: The maple state
5078 * @min: the minimum range
5079 * @max: The maximum range
5080 * @size: The size of the gap
5081 * @fwd: Searching forward or back
5083 static inline int mas_sparse_area(struct ma_state
*mas
, unsigned long min
,
5084 unsigned long max
, unsigned long size
, bool fwd
)
5086 if (!unlikely(mas_is_none(mas
)) && min
== 0) {
5089 * At this time, min is increased, we need to recheck whether
5090 * the size is satisfied.
5092 if (min
> max
|| max
- min
+ 1 < size
)
5099 mas
->last
= min
+ size
- 1;
5102 mas
->index
= max
- size
+ 1;
5108 * mas_empty_area() - Get the lowest address within the range that is
5109 * sufficient for the size requested.
5110 * @mas: The maple state
5111 * @min: The lowest value of the range
5112 * @max: The highest value of the range
5113 * @size: The size needed
5115 int mas_empty_area(struct ma_state
*mas
, unsigned long min
,
5116 unsigned long max
, unsigned long size
)
5118 unsigned char offset
;
5119 unsigned long *pivots
;
5125 if (size
== 0 || max
- min
< size
- 1)
5128 if (mas_is_start(mas
))
5130 else if (mas
->offset
>= 2)
5132 else if (!mas_skip_node(mas
))
5136 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5137 return mas_sparse_area(mas
, min
, max
, size
, true);
5139 /* The start of the window can only be within these values */
5142 mas_awalk(mas
, size
);
5144 if (unlikely(mas_is_err(mas
)))
5145 return xa_err(mas
->node
);
5147 offset
= mas
->offset
;
5148 if (unlikely(offset
== MAPLE_NODE_SLOTS
))
5151 mt
= mte_node_type(mas
->node
);
5152 pivots
= ma_pivots(mas_mn(mas
), mt
);
5153 min
= mas_safe_min(mas
, pivots
, offset
);
5154 if (mas
->index
< min
)
5156 mas
->last
= mas
->index
+ size
- 1;
5159 EXPORT_SYMBOL_GPL(mas_empty_area
);
5162 * mas_empty_area_rev() - Get the highest address within the range that is
5163 * sufficient for the size requested.
5164 * @mas: The maple state
5165 * @min: The lowest value of the range
5166 * @max: The highest value of the range
5167 * @size: The size needed
5169 int mas_empty_area_rev(struct ma_state
*mas
, unsigned long min
,
5170 unsigned long max
, unsigned long size
)
5172 struct maple_enode
*last
= mas
->node
;
5177 if (size
== 0 || max
- min
< size
- 1)
5180 if (mas_is_start(mas
)) {
5182 mas
->offset
= mas_data_end(mas
);
5183 } else if (mas
->offset
>= 2) {
5185 } else if (!mas_rewind_node(mas
)) {
5190 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5191 return mas_sparse_area(mas
, min
, max
, size
, false);
5193 /* The start of the window can only be within these values. */
5197 while (!mas_rev_awalk(mas
, size
, &min
, &max
)) {
5198 if (last
== mas
->node
) {
5199 if (!mas_rewind_node(mas
))
5206 if (mas_is_err(mas
))
5207 return xa_err(mas
->node
);
5209 if (unlikely(mas
->offset
== MAPLE_NODE_SLOTS
))
5212 /* Trim the upper limit to the max. */
5213 if (max
< mas
->last
)
5216 mas
->index
= mas
->last
- size
+ 1;
5219 EXPORT_SYMBOL_GPL(mas_empty_area_rev
);
5222 * mte_dead_leaves() - Mark all leaves of a node as dead.
5223 * @mas: The maple state
5224 * @slots: Pointer to the slot array
5225 * @type: The maple node type
5227 * Must hold the write lock.
5229 * Return: The number of leaves marked as dead.
5232 unsigned char mte_dead_leaves(struct maple_enode
*enode
, struct maple_tree
*mt
,
5235 struct maple_node
*node
;
5236 enum maple_type type
;
5240 for (offset
= 0; offset
< mt_slot_count(enode
); offset
++) {
5241 entry
= mt_slot(mt
, slots
, offset
);
5242 type
= mte_node_type(entry
);
5243 node
= mte_to_node(entry
);
5244 /* Use both node and type to catch LE & BE metadata */
5248 mte_set_node_dead(entry
);
5250 rcu_assign_pointer(slots
[offset
], node
);
5257 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5258 * @enode: The maple encoded node
5259 * @offset: The starting offset
5261 * Note: This can only be used from the RCU callback context.
5263 static void __rcu
**mte_dead_walk(struct maple_enode
**enode
, unsigned char offset
)
5265 struct maple_node
*node
, *next
;
5266 void __rcu
**slots
= NULL
;
5268 next
= mte_to_node(*enode
);
5270 *enode
= ma_enode_ptr(next
);
5271 node
= mte_to_node(*enode
);
5272 slots
= ma_slots(node
, node
->type
);
5273 next
= rcu_dereference_protected(slots
[offset
],
5274 lock_is_held(&rcu_callback_map
));
5276 } while (!ma_is_leaf(next
->type
));
5282 * mt_free_walk() - Walk & free a tree in the RCU callback context
5283 * @head: The RCU head that's within the node.
5285 * Note: This can only be used from the RCU callback context.
5287 static void mt_free_walk(struct rcu_head
*head
)
5290 struct maple_node
*node
, *start
;
5291 struct maple_enode
*enode
;
5292 unsigned char offset
;
5293 enum maple_type type
;
5295 node
= container_of(head
, struct maple_node
, rcu
);
5297 if (ma_is_leaf(node
->type
))
5301 enode
= mt_mk_node(node
, node
->type
);
5302 slots
= mte_dead_walk(&enode
, 0);
5303 node
= mte_to_node(enode
);
5305 mt_free_bulk(node
->slot_len
, slots
);
5306 offset
= node
->parent_slot
+ 1;
5307 enode
= node
->piv_parent
;
5308 if (mte_to_node(enode
) == node
)
5311 type
= mte_node_type(enode
);
5312 slots
= ma_slots(mte_to_node(enode
), type
);
5313 if ((offset
< mt_slots
[type
]) &&
5314 rcu_dereference_protected(slots
[offset
],
5315 lock_is_held(&rcu_callback_map
)))
5316 slots
= mte_dead_walk(&enode
, offset
);
5317 node
= mte_to_node(enode
);
5318 } while ((node
!= start
) || (node
->slot_len
< offset
));
5320 slots
= ma_slots(node
, node
->type
);
5321 mt_free_bulk(node
->slot_len
, slots
);
5324 mt_free_rcu(&node
->rcu
);
5327 static inline void __rcu
**mte_destroy_descend(struct maple_enode
**enode
,
5328 struct maple_tree
*mt
, struct maple_enode
*prev
, unsigned char offset
)
5330 struct maple_node
*node
;
5331 struct maple_enode
*next
= *enode
;
5332 void __rcu
**slots
= NULL
;
5333 enum maple_type type
;
5334 unsigned char next_offset
= 0;
5338 node
= mte_to_node(*enode
);
5339 type
= mte_node_type(*enode
);
5340 slots
= ma_slots(node
, type
);
5341 next
= mt_slot_locked(mt
, slots
, next_offset
);
5342 if ((mte_dead_node(next
)))
5343 next
= mt_slot_locked(mt
, slots
, ++next_offset
);
5345 mte_set_node_dead(*enode
);
5347 node
->piv_parent
= prev
;
5348 node
->parent_slot
= offset
;
5349 offset
= next_offset
;
5352 } while (!mte_is_leaf(next
));
5357 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
5361 struct maple_node
*node
= mte_to_node(enode
);
5362 struct maple_enode
*start
;
5364 if (mte_is_leaf(enode
)) {
5365 node
->type
= mte_node_type(enode
);
5370 slots
= mte_destroy_descend(&enode
, mt
, start
, 0);
5371 node
= mte_to_node(enode
); // Updated in the above call.
5373 enum maple_type type
;
5374 unsigned char offset
;
5375 struct maple_enode
*parent
, *tmp
;
5377 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5379 mt_free_bulk(node
->slot_len
, slots
);
5380 offset
= node
->parent_slot
+ 1;
5381 enode
= node
->piv_parent
;
5382 if (mte_to_node(enode
) == node
)
5385 type
= mte_node_type(enode
);
5386 slots
= ma_slots(mte_to_node(enode
), type
);
5387 if (offset
>= mt_slots
[type
])
5390 tmp
= mt_slot_locked(mt
, slots
, offset
);
5391 if (mte_node_type(tmp
) && mte_to_node(tmp
)) {
5394 slots
= mte_destroy_descend(&enode
, mt
, parent
, offset
);
5397 node
= mte_to_node(enode
);
5398 } while (start
!= enode
);
5400 node
= mte_to_node(enode
);
5401 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5403 mt_free_bulk(node
->slot_len
, slots
);
5407 mt_free_rcu(&node
->rcu
);
5409 mt_clear_meta(mt
, node
, node
->type
);
5413 * mte_destroy_walk() - Free a tree or sub-tree.
5414 * @enode: the encoded maple node (maple_enode) to start
5415 * @mt: the tree to free - needed for node types.
5417 * Must hold the write lock.
5419 static inline void mte_destroy_walk(struct maple_enode
*enode
,
5420 struct maple_tree
*mt
)
5422 struct maple_node
*node
= mte_to_node(enode
);
5424 if (mt_in_rcu(mt
)) {
5425 mt_destroy_walk(enode
, mt
, false);
5426 call_rcu(&node
->rcu
, mt_free_walk
);
5428 mt_destroy_walk(enode
, mt
, true);
5432 static void mas_wr_store_setup(struct ma_wr_state
*wr_mas
)
5434 if (unlikely(mas_is_paused(wr_mas
->mas
)))
5435 mas_reset(wr_mas
->mas
);
5437 if (!mas_is_start(wr_mas
->mas
)) {
5438 if (mas_is_none(wr_mas
->mas
)) {
5439 mas_reset(wr_mas
->mas
);
5441 wr_mas
->r_max
= wr_mas
->mas
->max
;
5442 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
5443 if (mas_is_span_wr(wr_mas
))
5444 mas_reset(wr_mas
->mas
);
5452 * mas_store() - Store an @entry.
5453 * @mas: The maple state.
5454 * @entry: The entry to store.
5456 * The @mas->index and @mas->last is used to set the range for the @entry.
5457 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5458 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5460 * Return: the first entry between mas->index and mas->last or %NULL.
5462 void *mas_store(struct ma_state
*mas
, void *entry
)
5464 MA_WR_STATE(wr_mas
, mas
, entry
);
5466 trace_ma_write(__func__
, mas
, 0, entry
);
5467 #ifdef CONFIG_DEBUG_MAPLE_TREE
5468 if (MAS_WARN_ON(mas
, mas
->index
> mas
->last
))
5469 pr_err("Error %lX > %lX %p\n", mas
->index
, mas
->last
, entry
);
5471 if (mas
->index
> mas
->last
) {
5472 mas_set_err(mas
, -EINVAL
);
5479 * Storing is the same operation as insert with the added caveat that it
5480 * can overwrite entries. Although this seems simple enough, one may
5481 * want to examine what happens if a single store operation was to
5482 * overwrite multiple entries within a self-balancing B-Tree.
5484 mas_wr_store_setup(&wr_mas
);
5485 mas_wr_store_entry(&wr_mas
);
5486 return wr_mas
.content
;
5488 EXPORT_SYMBOL_GPL(mas_store
);
5491 * mas_store_gfp() - Store a value into the tree.
5492 * @mas: The maple state
5493 * @entry: The entry to store
5494 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5496 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5499 int mas_store_gfp(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5501 MA_WR_STATE(wr_mas
, mas
, entry
);
5503 mas_wr_store_setup(&wr_mas
);
5504 trace_ma_write(__func__
, mas
, 0, entry
);
5506 mas_wr_store_entry(&wr_mas
);
5507 if (unlikely(mas_nomem(mas
, gfp
)))
5510 if (unlikely(mas_is_err(mas
)))
5511 return xa_err(mas
->node
);
5515 EXPORT_SYMBOL_GPL(mas_store_gfp
);
5518 * mas_store_prealloc() - Store a value into the tree using memory
5519 * preallocated in the maple state.
5520 * @mas: The maple state
5521 * @entry: The entry to store.
5523 void mas_store_prealloc(struct ma_state
*mas
, void *entry
)
5525 MA_WR_STATE(wr_mas
, mas
, entry
);
5527 mas_wr_store_setup(&wr_mas
);
5528 trace_ma_write(__func__
, mas
, 0, entry
);
5529 mas_wr_store_entry(&wr_mas
);
5530 MAS_WR_BUG_ON(&wr_mas
, mas_is_err(mas
));
5533 EXPORT_SYMBOL_GPL(mas_store_prealloc
);
5536 * mas_preallocate() - Preallocate enough nodes for a store operation
5537 * @mas: The maple state
5538 * @gfp: The GFP_FLAGS to use for allocations.
5540 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5542 int mas_preallocate(struct ma_state
*mas
, gfp_t gfp
)
5546 mas_node_count_gfp(mas
, 1 + mas_mt_height(mas
) * 3, gfp
);
5547 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5548 if (likely(!mas_is_err(mas
)))
5551 mas_set_alloc_req(mas
, 0);
5552 ret
= xa_err(mas
->node
);
5558 EXPORT_SYMBOL_GPL(mas_preallocate
);
5561 * mas_destroy() - destroy a maple state.
5562 * @mas: The maple state
5564 * Upon completion, check the left-most node and rebalance against the node to
5565 * the right if necessary. Frees any allocated nodes associated with this maple
5568 void mas_destroy(struct ma_state
*mas
)
5570 struct maple_alloc
*node
;
5571 unsigned long total
;
5574 * When using mas_for_each() to insert an expected number of elements,
5575 * it is possible that the number inserted is less than the expected
5576 * number. To fix an invalid final node, a check is performed here to
5577 * rebalance the previous node with the final node.
5579 if (mas
->mas_flags
& MA_STATE_REBALANCE
) {
5583 mtree_range_walk(mas
);
5584 end
= mas_data_end(mas
) + 1;
5585 if (end
< mt_min_slot_count(mas
->node
) - 1)
5586 mas_destroy_rebalance(mas
, end
);
5588 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
5590 mas
->mas_flags
&= ~(MA_STATE_BULK
|MA_STATE_PREALLOC
);
5592 total
= mas_allocated(mas
);
5595 mas
->alloc
= node
->slot
[0];
5596 if (node
->node_count
> 1) {
5597 size_t count
= node
->node_count
- 1;
5599 mt_free_bulk(count
, (void __rcu
**)&node
->slot
[1]);
5602 kmem_cache_free(maple_node_cache
, node
);
5608 EXPORT_SYMBOL_GPL(mas_destroy
);
5611 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5612 * @mas: The maple state
5613 * @nr_entries: The number of expected entries.
5615 * This will attempt to pre-allocate enough nodes to store the expected number
5616 * of entries. The allocations will occur using the bulk allocator interface
5617 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5618 * to ensure any unused nodes are freed.
5620 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5622 int mas_expected_entries(struct ma_state
*mas
, unsigned long nr_entries
)
5624 int nonleaf_cap
= MAPLE_ARANGE64_SLOTS
- 2;
5625 struct maple_enode
*enode
= mas
->node
;
5630 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5631 * forking a process and duplicating the VMAs from one tree to a new
5632 * tree. When such a situation arises, it is known that the new tree is
5633 * not going to be used until the entire tree is populated. For
5634 * performance reasons, it is best to use a bulk load with RCU disabled.
5635 * This allows for optimistic splitting that favours the left and reuse
5636 * of nodes during the operation.
5639 /* Optimize splitting for bulk insert in-order */
5640 mas
->mas_flags
|= MA_STATE_BULK
;
5643 * Avoid overflow, assume a gap between each entry and a trailing null.
5644 * If this is wrong, it just means allocation can happen during
5645 * insertion of entries.
5647 nr_nodes
= max(nr_entries
, nr_entries
* 2 + 1);
5648 if (!mt_is_alloc(mas
->tree
))
5649 nonleaf_cap
= MAPLE_RANGE64_SLOTS
- 2;
5651 /* Leaves; reduce slots to keep space for expansion */
5652 nr_nodes
= DIV_ROUND_UP(nr_nodes
, MAPLE_RANGE64_SLOTS
- 2);
5653 /* Internal nodes */
5654 nr_nodes
+= DIV_ROUND_UP(nr_nodes
, nonleaf_cap
);
5655 /* Add working room for split (2 nodes) + new parents */
5656 mas_node_count(mas
, nr_nodes
+ 3);
5658 /* Detect if allocations run out */
5659 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5661 if (!mas_is_err(mas
))
5664 ret
= xa_err(mas
->node
);
5670 EXPORT_SYMBOL_GPL(mas_expected_entries
);
5672 static inline bool mas_next_setup(struct ma_state
*mas
, unsigned long max
,
5675 bool was_none
= mas_is_none(mas
);
5677 if (mas_is_none(mas
) || mas_is_paused(mas
))
5678 mas
->node
= MAS_START
;
5680 if (mas_is_start(mas
))
5681 *entry
= mas_walk(mas
); /* Retries on dead nodes handled by mas_walk */
5683 if (mas_is_ptr(mas
)) {
5685 if (was_none
&& mas
->index
== 0) {
5686 mas
->index
= mas
->last
= 0;
5690 mas
->last
= ULONG_MAX
;
5691 mas
->node
= MAS_NONE
;
5695 if (mas_is_none(mas
))
5701 * mas_next() - Get the next entry.
5702 * @mas: The maple state
5703 * @max: The maximum index to check.
5705 * Returns the next entry after @mas->index.
5706 * Must hold rcu_read_lock or the write lock.
5707 * Can return the zero entry.
5709 * Return: The next entry or %NULL
5711 void *mas_next(struct ma_state
*mas
, unsigned long max
)
5715 if (mas_next_setup(mas
, max
, &entry
))
5718 /* Retries on dead nodes handled by mas_next_slot */
5719 return mas_next_slot(mas
, max
, false);
5721 EXPORT_SYMBOL_GPL(mas_next
);
5724 * mas_next_range() - Advance the maple state to the next range
5725 * @mas: The maple state
5726 * @max: The maximum index to check.
5728 * Sets @mas->index and @mas->last to the range.
5729 * Must hold rcu_read_lock or the write lock.
5730 * Can return the zero entry.
5732 * Return: The next entry or %NULL
5734 void *mas_next_range(struct ma_state
*mas
, unsigned long max
)
5738 if (mas_next_setup(mas
, max
, &entry
))
5741 /* Retries on dead nodes handled by mas_next_slot */
5742 return mas_next_slot(mas
, max
, true);
5744 EXPORT_SYMBOL_GPL(mas_next_range
);
5747 * mt_next() - get the next value in the maple tree
5748 * @mt: The maple tree
5749 * @index: The start index
5750 * @max: The maximum index to check
5752 * Return: The entry at @index or higher, or %NULL if nothing is found.
5754 void *mt_next(struct maple_tree
*mt
, unsigned long index
, unsigned long max
)
5757 MA_STATE(mas
, mt
, index
, index
);
5760 entry
= mas_next(&mas
, max
);
5764 EXPORT_SYMBOL_GPL(mt_next
);
5766 static inline bool mas_prev_setup(struct ma_state
*mas
, unsigned long min
,
5769 if (mas
->index
<= min
)
5772 if (mas_is_none(mas
) || mas_is_paused(mas
))
5773 mas
->node
= MAS_START
;
5775 if (mas_is_start(mas
)) {
5781 if (unlikely(mas_is_ptr(mas
))) {
5784 mas
->index
= mas
->last
= 0;
5785 *entry
= mas_root(mas
);
5789 if (mas_is_none(mas
)) {
5791 /* Walked to out-of-range pointer? */
5792 mas
->index
= mas
->last
= 0;
5793 mas
->node
= MAS_ROOT
;
5794 *entry
= mas_root(mas
);
5803 mas
->node
= MAS_NONE
;
5808 * mas_prev() - Get the previous entry
5809 * @mas: The maple state
5810 * @min: The minimum value to check.
5812 * Must hold rcu_read_lock or the write lock.
5813 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5816 * Return: the previous value or %NULL.
5818 void *mas_prev(struct ma_state
*mas
, unsigned long min
)
5822 if (mas_prev_setup(mas
, min
, &entry
))
5825 return mas_prev_slot(mas
, min
, false);
5827 EXPORT_SYMBOL_GPL(mas_prev
);
5830 * mas_prev_range() - Advance to the previous range
5831 * @mas: The maple state
5832 * @min: The minimum value to check.
5834 * Sets @mas->index and @mas->last to the range.
5835 * Must hold rcu_read_lock or the write lock.
5836 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5839 * Return: the previous value or %NULL.
5841 void *mas_prev_range(struct ma_state
*mas
, unsigned long min
)
5845 if (mas_prev_setup(mas
, min
, &entry
))
5848 return mas_prev_slot(mas
, min
, true);
5850 EXPORT_SYMBOL_GPL(mas_prev_range
);
5853 * mt_prev() - get the previous value in the maple tree
5854 * @mt: The maple tree
5855 * @index: The start index
5856 * @min: The minimum index to check
5858 * Return: The entry at @index or lower, or %NULL if nothing is found.
5860 void *mt_prev(struct maple_tree
*mt
, unsigned long index
, unsigned long min
)
5863 MA_STATE(mas
, mt
, index
, index
);
5866 entry
= mas_prev(&mas
, min
);
5870 EXPORT_SYMBOL_GPL(mt_prev
);
5873 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5874 * @mas: The maple state to pause
5876 * Some users need to pause a walk and drop the lock they're holding in
5877 * order to yield to a higher priority thread or carry out an operation
5878 * on an entry. Those users should call this function before they drop
5879 * the lock. It resets the @mas to be suitable for the next iteration
5880 * of the loop after the user has reacquired the lock. If most entries
5881 * found during a walk require you to call mas_pause(), the mt_for_each()
5882 * iterator may be more appropriate.
5885 void mas_pause(struct ma_state
*mas
)
5887 mas
->node
= MAS_PAUSE
;
5889 EXPORT_SYMBOL_GPL(mas_pause
);
5892 * mas_find_setup() - Internal function to set up mas_find*().
5893 * @mas: The maple state
5894 * @max: The maximum index
5895 * @entry: Pointer to the entry
5897 * Returns: True if entry is the answer, false otherwise.
5899 static inline bool mas_find_setup(struct ma_state
*mas
, unsigned long max
,
5904 if (unlikely(mas_is_none(mas
))) {
5905 if (unlikely(mas
->last
>= max
))
5908 mas
->index
= mas
->last
;
5909 mas
->node
= MAS_START
;
5910 } else if (unlikely(mas_is_paused(mas
))) {
5911 if (unlikely(mas
->last
>= max
))
5914 mas
->node
= MAS_START
;
5915 mas
->index
= ++mas
->last
;
5916 } else if (unlikely(mas_is_ptr(mas
)))
5917 goto ptr_out_of_range
;
5919 if (unlikely(mas_is_start(mas
))) {
5920 /* First run or continue */
5921 if (mas
->index
> max
)
5924 *entry
= mas_walk(mas
);
5930 if (unlikely(!mas_searchable(mas
))) {
5931 if (unlikely(mas_is_ptr(mas
)))
5932 goto ptr_out_of_range
;
5937 if (mas
->index
== max
)
5943 mas
->node
= MAS_NONE
;
5945 mas
->last
= ULONG_MAX
;
5950 * mas_find() - On the first call, find the entry at or after mas->index up to
5951 * %max. Otherwise, find the entry after mas->index.
5952 * @mas: The maple state
5953 * @max: The maximum value to check.
5955 * Must hold rcu_read_lock or the write lock.
5956 * If an entry exists, last and index are updated accordingly.
5957 * May set @mas->node to MAS_NONE.
5959 * Return: The entry or %NULL.
5961 void *mas_find(struct ma_state
*mas
, unsigned long max
)
5965 if (mas_find_setup(mas
, max
, &entry
))
5968 /* Retries on dead nodes handled by mas_next_slot */
5969 return mas_next_slot(mas
, max
, false);
5971 EXPORT_SYMBOL_GPL(mas_find
);
5974 * mas_find_range() - On the first call, find the entry at or after
5975 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
5976 * @mas: The maple state
5977 * @max: The maximum value to check.
5979 * Must hold rcu_read_lock or the write lock.
5980 * If an entry exists, last and index are updated accordingly.
5981 * May set @mas->node to MAS_NONE.
5983 * Return: The entry or %NULL.
5985 void *mas_find_range(struct ma_state
*mas
, unsigned long max
)
5989 if (mas_find_setup(mas
, max
, &entry
))
5992 /* Retries on dead nodes handled by mas_next_slot */
5993 return mas_next_slot(mas
, max
, true);
5995 EXPORT_SYMBOL_GPL(mas_find_range
);
5998 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
5999 * @mas: The maple state
6000 * @min: The minimum index
6001 * @entry: Pointer to the entry
6003 * Returns: True if entry is the answer, false otherwise.
6005 static inline bool mas_find_rev_setup(struct ma_state
*mas
, unsigned long min
,
6010 if (unlikely(mas_is_none(mas
))) {
6011 if (mas
->index
<= min
)
6014 mas
->last
= mas
->index
;
6015 mas
->node
= MAS_START
;
6018 if (unlikely(mas_is_paused(mas
))) {
6019 if (unlikely(mas
->index
<= min
)) {
6020 mas
->node
= MAS_NONE
;
6023 mas
->node
= MAS_START
;
6024 mas
->last
= --mas
->index
;
6027 if (unlikely(mas_is_start(mas
))) {
6028 /* First run or continue */
6029 if (mas
->index
< min
)
6032 *entry
= mas_walk(mas
);
6037 if (unlikely(!mas_searchable(mas
))) {
6038 if (mas_is_ptr(mas
))
6041 if (mas_is_none(mas
)) {
6043 * Walked to the location, and there was nothing so the
6044 * previous location is 0.
6046 mas
->last
= mas
->index
= 0;
6047 mas
->node
= MAS_ROOT
;
6048 *entry
= mas_root(mas
);
6053 if (mas
->index
< min
)
6059 mas
->node
= MAS_NONE
;
6064 * mas_find_rev: On the first call, find the first non-null entry at or below
6065 * mas->index down to %min. Otherwise find the first non-null entry below
6066 * mas->index down to %min.
6067 * @mas: The maple state
6068 * @min: The minimum value to check.
6070 * Must hold rcu_read_lock or the write lock.
6071 * If an entry exists, last and index are updated accordingly.
6072 * May set @mas->node to MAS_NONE.
6074 * Return: The entry or %NULL.
6076 void *mas_find_rev(struct ma_state
*mas
, unsigned long min
)
6080 if (mas_find_rev_setup(mas
, min
, &entry
))
6083 /* Retries on dead nodes handled by mas_prev_slot */
6084 return mas_prev_slot(mas
, min
, false);
6087 EXPORT_SYMBOL_GPL(mas_find_rev
);
6090 * mas_find_range_rev: On the first call, find the first non-null entry at or
6091 * below mas->index down to %min. Otherwise advance to the previous slot after
6092 * mas->index down to %min.
6093 * @mas: The maple state
6094 * @min: The minimum value to check.
6096 * Must hold rcu_read_lock or the write lock.
6097 * If an entry exists, last and index are updated accordingly.
6098 * May set @mas->node to MAS_NONE.
6100 * Return: The entry or %NULL.
6102 void *mas_find_range_rev(struct ma_state
*mas
, unsigned long min
)
6106 if (mas_find_rev_setup(mas
, min
, &entry
))
6109 /* Retries on dead nodes handled by mas_prev_slot */
6110 return mas_prev_slot(mas
, min
, true);
6112 EXPORT_SYMBOL_GPL(mas_find_range_rev
);
6115 * mas_erase() - Find the range in which index resides and erase the entire
6117 * @mas: The maple state
6119 * Must hold the write lock.
6120 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6121 * erases that range.
6123 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6125 void *mas_erase(struct ma_state
*mas
)
6128 MA_WR_STATE(wr_mas
, mas
, NULL
);
6130 if (mas_is_none(mas
) || mas_is_paused(mas
))
6131 mas
->node
= MAS_START
;
6133 /* Retry unnecessary when holding the write lock. */
6134 entry
= mas_state_walk(mas
);
6139 /* Must reset to ensure spanning writes of last slot are detected */
6141 mas_wr_store_setup(&wr_mas
);
6142 mas_wr_store_entry(&wr_mas
);
6143 if (mas_nomem(mas
, GFP_KERNEL
))
6148 EXPORT_SYMBOL_GPL(mas_erase
);
6151 * mas_nomem() - Check if there was an error allocating and do the allocation
6152 * if necessary If there are allocations, then free them.
6153 * @mas: The maple state
6154 * @gfp: The GFP_FLAGS to use for allocations
6155 * Return: true on allocation, false otherwise.
6157 bool mas_nomem(struct ma_state
*mas
, gfp_t gfp
)
6158 __must_hold(mas
->tree
->ma_lock
)
6160 if (likely(mas
->node
!= MA_ERROR(-ENOMEM
))) {
6165 if (gfpflags_allow_blocking(gfp
) && !mt_external_lock(mas
->tree
)) {
6166 mtree_unlock(mas
->tree
);
6167 mas_alloc_nodes(mas
, gfp
);
6168 mtree_lock(mas
->tree
);
6170 mas_alloc_nodes(mas
, gfp
);
6173 if (!mas_allocated(mas
))
6176 mas
->node
= MAS_START
;
6180 void __init
maple_tree_init(void)
6182 maple_node_cache
= kmem_cache_create("maple_node",
6183 sizeof(struct maple_node
), sizeof(struct maple_node
),
6188 * mtree_load() - Load a value stored in a maple tree
6189 * @mt: The maple tree
6190 * @index: The index to load
6192 * Return: the entry or %NULL
6194 void *mtree_load(struct maple_tree
*mt
, unsigned long index
)
6196 MA_STATE(mas
, mt
, index
, index
);
6199 trace_ma_read(__func__
, &mas
);
6202 entry
= mas_start(&mas
);
6203 if (unlikely(mas_is_none(&mas
)))
6206 if (unlikely(mas_is_ptr(&mas
))) {
6213 entry
= mtree_lookup_walk(&mas
);
6214 if (!entry
&& unlikely(mas_is_start(&mas
)))
6218 if (xa_is_zero(entry
))
6223 EXPORT_SYMBOL(mtree_load
);
6226 * mtree_store_range() - Store an entry at a given range.
6227 * @mt: The maple tree
6228 * @index: The start of the range
6229 * @last: The end of the range
6230 * @entry: The entry to store
6231 * @gfp: The GFP_FLAGS to use for allocations
6233 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6236 int mtree_store_range(struct maple_tree
*mt
, unsigned long index
,
6237 unsigned long last
, void *entry
, gfp_t gfp
)
6239 MA_STATE(mas
, mt
, index
, last
);
6240 MA_WR_STATE(wr_mas
, &mas
, entry
);
6242 trace_ma_write(__func__
, &mas
, 0, entry
);
6243 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6251 mas_wr_store_entry(&wr_mas
);
6252 if (mas_nomem(&mas
, gfp
))
6256 if (mas_is_err(&mas
))
6257 return xa_err(mas
.node
);
6261 EXPORT_SYMBOL(mtree_store_range
);
6264 * mtree_store() - Store an entry at a given index.
6265 * @mt: The maple tree
6266 * @index: The index to store the value
6267 * @entry: The entry to store
6268 * @gfp: The GFP_FLAGS to use for allocations
6270 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6273 int mtree_store(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6276 return mtree_store_range(mt
, index
, index
, entry
, gfp
);
6278 EXPORT_SYMBOL(mtree_store
);
6281 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6282 * @mt: The maple tree
6283 * @first: The start of the range
6284 * @last: The end of the range
6285 * @entry: The entry to store
6286 * @gfp: The GFP_FLAGS to use for allocations.
6288 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6289 * request, -ENOMEM if memory could not be allocated.
6291 int mtree_insert_range(struct maple_tree
*mt
, unsigned long first
,
6292 unsigned long last
, void *entry
, gfp_t gfp
)
6294 MA_STATE(ms
, mt
, first
, last
);
6296 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6304 mas_insert(&ms
, entry
);
6305 if (mas_nomem(&ms
, gfp
))
6309 if (mas_is_err(&ms
))
6310 return xa_err(ms
.node
);
6314 EXPORT_SYMBOL(mtree_insert_range
);
6317 * mtree_insert() - Insert an entry at a give index if there is no value.
6318 * @mt: The maple tree
6319 * @index : The index to store the value
6320 * @entry: The entry to store
6321 * @gfp: The FGP_FLAGS to use for allocations.
6323 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6324 * request, -ENOMEM if memory could not be allocated.
6326 int mtree_insert(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6329 return mtree_insert_range(mt
, index
, index
, entry
, gfp
);
6331 EXPORT_SYMBOL(mtree_insert
);
6333 int mtree_alloc_range(struct maple_tree
*mt
, unsigned long *startp
,
6334 void *entry
, unsigned long size
, unsigned long min
,
6335 unsigned long max
, gfp_t gfp
)
6339 MA_STATE(mas
, mt
, 0, 0);
6340 if (!mt_is_alloc(mt
))
6343 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6348 ret
= mas_empty_area(&mas
, min
, max
, size
);
6352 mas_insert(&mas
, entry
);
6354 * mas_nomem() may release the lock, causing the allocated area
6355 * to be unavailable, so try to allocate a free area again.
6357 if (mas_nomem(&mas
, gfp
))
6360 if (mas_is_err(&mas
))
6361 ret
= xa_err(mas
.node
);
6363 *startp
= mas
.index
;
6369 EXPORT_SYMBOL(mtree_alloc_range
);
6371 int mtree_alloc_rrange(struct maple_tree
*mt
, unsigned long *startp
,
6372 void *entry
, unsigned long size
, unsigned long min
,
6373 unsigned long max
, gfp_t gfp
)
6377 MA_STATE(mas
, mt
, 0, 0);
6378 if (!mt_is_alloc(mt
))
6381 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6386 ret
= mas_empty_area_rev(&mas
, min
, max
, size
);
6390 mas_insert(&mas
, entry
);
6392 * mas_nomem() may release the lock, causing the allocated area
6393 * to be unavailable, so try to allocate a free area again.
6395 if (mas_nomem(&mas
, gfp
))
6398 if (mas_is_err(&mas
))
6399 ret
= xa_err(mas
.node
);
6401 *startp
= mas
.index
;
6407 EXPORT_SYMBOL(mtree_alloc_rrange
);
6410 * mtree_erase() - Find an index and erase the entire range.
6411 * @mt: The maple tree
6412 * @index: The index to erase
6414 * Erasing is the same as a walk to an entry then a store of a NULL to that
6415 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6417 * Return: The entry stored at the @index or %NULL
6419 void *mtree_erase(struct maple_tree
*mt
, unsigned long index
)
6423 MA_STATE(mas
, mt
, index
, index
);
6424 trace_ma_op(__func__
, &mas
);
6427 entry
= mas_erase(&mas
);
6432 EXPORT_SYMBOL(mtree_erase
);
6435 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6436 * @mt: The maple tree
6438 * Note: Does not handle locking.
6440 void __mt_destroy(struct maple_tree
*mt
)
6442 void *root
= mt_root_locked(mt
);
6444 rcu_assign_pointer(mt
->ma_root
, NULL
);
6445 if (xa_is_node(root
))
6446 mte_destroy_walk(root
, mt
);
6450 EXPORT_SYMBOL_GPL(__mt_destroy
);
6453 * mtree_destroy() - Destroy a maple tree
6454 * @mt: The maple tree
6456 * Frees all resources used by the tree. Handles locking.
6458 void mtree_destroy(struct maple_tree
*mt
)
6464 EXPORT_SYMBOL(mtree_destroy
);
6467 * mt_find() - Search from the start up until an entry is found.
6468 * @mt: The maple tree
6469 * @index: Pointer which contains the start location of the search
6470 * @max: The maximum value to check
6472 * Handles locking. @index will be incremented to one beyond the range.
6474 * Return: The entry at or after the @index or %NULL
6476 void *mt_find(struct maple_tree
*mt
, unsigned long *index
, unsigned long max
)
6478 MA_STATE(mas
, mt
, *index
, *index
);
6480 #ifdef CONFIG_DEBUG_MAPLE_TREE
6481 unsigned long copy
= *index
;
6484 trace_ma_read(__func__
, &mas
);
6491 entry
= mas_state_walk(&mas
);
6492 if (mas_is_start(&mas
))
6495 if (unlikely(xa_is_zero(entry
)))
6501 while (mas_searchable(&mas
) && (mas
.last
< max
)) {
6502 entry
= mas_next_entry(&mas
, max
);
6503 if (likely(entry
&& !xa_is_zero(entry
)))
6507 if (unlikely(xa_is_zero(entry
)))
6511 if (likely(entry
)) {
6512 *index
= mas
.last
+ 1;
6513 #ifdef CONFIG_DEBUG_MAPLE_TREE
6514 if (MT_WARN_ON(mt
, (*index
) && ((*index
) <= copy
)))
6515 pr_err("index not increased! %lx <= %lx\n",
6522 EXPORT_SYMBOL(mt_find
);
6525 * mt_find_after() - Search from the start up until an entry is found.
6526 * @mt: The maple tree
6527 * @index: Pointer which contains the start location of the search
6528 * @max: The maximum value to check
6530 * Handles locking, detects wrapping on index == 0
6532 * Return: The entry at or after the @index or %NULL
6534 void *mt_find_after(struct maple_tree
*mt
, unsigned long *index
,
6540 return mt_find(mt
, index
, max
);
6542 EXPORT_SYMBOL(mt_find_after
);
6544 #ifdef CONFIG_DEBUG_MAPLE_TREE
6545 atomic_t maple_tree_tests_run
;
6546 EXPORT_SYMBOL_GPL(maple_tree_tests_run
);
6547 atomic_t maple_tree_tests_passed
;
6548 EXPORT_SYMBOL_GPL(maple_tree_tests_passed
);
6551 extern void kmem_cache_set_non_kernel(struct kmem_cache
*, unsigned int);
6552 void mt_set_non_kernel(unsigned int val
)
6554 kmem_cache_set_non_kernel(maple_node_cache
, val
);
6557 extern unsigned long kmem_cache_get_alloc(struct kmem_cache
*);
6558 unsigned long mt_get_alloc_size(void)
6560 return kmem_cache_get_alloc(maple_node_cache
);
6563 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache
*);
6564 void mt_zero_nr_tallocated(void)
6566 kmem_cache_zero_nr_tallocated(maple_node_cache
);
6569 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache
*);
6570 unsigned int mt_nr_tallocated(void)
6572 return kmem_cache_nr_tallocated(maple_node_cache
);
6575 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache
*);
6576 unsigned int mt_nr_allocated(void)
6578 return kmem_cache_nr_allocated(maple_node_cache
);
6582 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6583 * @mas: The maple state
6584 * @index: The index to restore in @mas.
6586 * Used in test code.
6587 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6589 static inline int mas_dead_node(struct ma_state
*mas
, unsigned long index
)
6591 if (unlikely(!mas_searchable(mas
) || mas_is_start(mas
)))
6594 if (likely(!mte_dead_node(mas
->node
)))
6597 mas_rewalk(mas
, index
);
6601 void mt_cache_shrink(void)
6606 * mt_cache_shrink() - For testing, don't use this.
6608 * Certain testcases can trigger an OOM when combined with other memory
6609 * debugging configuration options. This function is used to reduce the
6610 * possibility of an out of memory even due to kmem_cache objects remaining
6611 * around for longer than usual.
6613 void mt_cache_shrink(void)
6615 kmem_cache_shrink(maple_node_cache
);
6618 EXPORT_SYMBOL_GPL(mt_cache_shrink
);
6620 #endif /* not defined __KERNEL__ */
6622 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6623 * @mas: The maple state
6624 * @offset: The offset into the slot array to fetch.
6626 * Return: The entry stored at @offset.
6628 static inline struct maple_enode
*mas_get_slot(struct ma_state
*mas
,
6629 unsigned char offset
)
6631 return mas_slot(mas
, ma_slots(mas_mn(mas
), mte_node_type(mas
->node
)),
6637 * mas_first_entry() - Go the first leaf and find the first entry.
6638 * @mas: the maple state.
6639 * @limit: the maximum index to check.
6640 * @*r_start: Pointer to set to the range start.
6642 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6644 * Return: The first entry or MAS_NONE.
6646 static inline void *mas_first_entry(struct ma_state
*mas
, struct maple_node
*mn
,
6647 unsigned long limit
, enum maple_type mt
)
6651 unsigned long *pivots
;
6655 mas
->index
= mas
->min
;
6656 if (mas
->index
> limit
)
6661 while (likely(!ma_is_leaf(mt
))) {
6662 MAS_WARN_ON(mas
, mte_dead_node(mas
->node
));
6663 slots
= ma_slots(mn
, mt
);
6664 entry
= mas_slot(mas
, slots
, 0);
6665 pivots
= ma_pivots(mn
, mt
);
6666 if (unlikely(ma_dead_node(mn
)))
6671 mt
= mte_node_type(mas
->node
);
6673 MAS_WARN_ON(mas
, mte_dead_node(mas
->node
));
6676 slots
= ma_slots(mn
, mt
);
6677 entry
= mas_slot(mas
, slots
, 0);
6678 if (unlikely(ma_dead_node(mn
)))
6681 /* Slot 0 or 1 must be set */
6682 if (mas
->index
> limit
)
6689 entry
= mas_slot(mas
, slots
, 1);
6690 pivots
= ma_pivots(mn
, mt
);
6691 if (unlikely(ma_dead_node(mn
)))
6694 mas
->index
= pivots
[0] + 1;
6695 if (mas
->index
> limit
)
6702 if (likely(!ma_dead_node(mn
)))
6703 mas
->node
= MAS_NONE
;
6707 /* Depth first search, post-order */
6708 static void mas_dfs_postorder(struct ma_state
*mas
, unsigned long max
)
6711 struct maple_enode
*p
= MAS_NONE
, *mn
= mas
->node
;
6712 unsigned long p_min
, p_max
;
6714 mas_next_node(mas
, mas_mn(mas
), max
);
6715 if (!mas_is_none(mas
))
6718 if (mte_is_root(mn
))
6727 mas_prev_node(mas
, 0);
6728 } while (!mas_is_none(mas
));
6735 /* Tree validations */
6736 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
6737 unsigned long min
, unsigned long max
, unsigned int depth
,
6738 enum mt_dump_format format
);
6739 static void mt_dump_range(unsigned long min
, unsigned long max
,
6740 unsigned int depth
, enum mt_dump_format format
)
6742 static const char spaces
[] = " ";
6747 pr_info("%.*s%lx: ", depth
* 2, spaces
, min
);
6749 pr_info("%.*s%lx-%lx: ", depth
* 2, spaces
, min
, max
);
6754 pr_info("%.*s%lu: ", depth
* 2, spaces
, min
);
6756 pr_info("%.*s%lu-%lu: ", depth
* 2, spaces
, min
, max
);
6760 static void mt_dump_entry(void *entry
, unsigned long min
, unsigned long max
,
6761 unsigned int depth
, enum mt_dump_format format
)
6763 mt_dump_range(min
, max
, depth
, format
);
6765 if (xa_is_value(entry
))
6766 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry
),
6767 xa_to_value(entry
), entry
);
6768 else if (xa_is_zero(entry
))
6769 pr_cont("zero (%ld)\n", xa_to_internal(entry
));
6770 else if (mt_is_reserved(entry
))
6771 pr_cont("UNKNOWN ENTRY (%p)\n", entry
);
6773 pr_cont("%p\n", entry
);
6776 static void mt_dump_range64(const struct maple_tree
*mt
, void *entry
,
6777 unsigned long min
, unsigned long max
, unsigned int depth
,
6778 enum mt_dump_format format
)
6780 struct maple_range_64
*node
= &mte_to_node(entry
)->mr64
;
6781 bool leaf
= mte_is_leaf(entry
);
6782 unsigned long first
= min
;
6785 pr_cont(" contents: ");
6786 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
- 1; i
++) {
6789 pr_cont("%p %lX ", node
->slot
[i
], node
->pivot
[i
]);
6793 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
6796 pr_cont("%p\n", node
->slot
[i
]);
6797 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
; i
++) {
6798 unsigned long last
= max
;
6800 if (i
< (MAPLE_RANGE64_SLOTS
- 1))
6801 last
= node
->pivot
[i
];
6802 else if (!node
->slot
[i
] && max
!= mt_node_max(entry
))
6804 if (last
== 0 && i
> 0)
6807 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6808 first
, last
, depth
+ 1, format
);
6809 else if (node
->slot
[i
])
6810 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
6811 first
, last
, depth
+ 1, format
);
6818 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6819 node
, last
, max
, i
);
6823 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6824 node
, last
, max
, i
);
6831 static void mt_dump_arange64(const struct maple_tree
*mt
, void *entry
,
6832 unsigned long min
, unsigned long max
, unsigned int depth
,
6833 enum mt_dump_format format
)
6835 struct maple_arange_64
*node
= &mte_to_node(entry
)->ma64
;
6836 bool leaf
= mte_is_leaf(entry
);
6837 unsigned long first
= min
;
6840 pr_cont(" contents: ");
6841 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++)
6842 pr_cont("%lu ", node
->gap
[i
]);
6843 pr_cont("| %02X %02X| ", node
->meta
.end
, node
->meta
.gap
);
6844 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
- 1; i
++)
6845 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
6846 pr_cont("%p\n", node
->slot
[i
]);
6847 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
6848 unsigned long last
= max
;
6850 if (i
< (MAPLE_ARANGE64_SLOTS
- 1))
6851 last
= node
->pivot
[i
];
6852 else if (!node
->slot
[i
])
6854 if (last
== 0 && i
> 0)
6857 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6858 first
, last
, depth
+ 1, format
);
6859 else if (node
->slot
[i
])
6860 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
6861 first
, last
, depth
+ 1, format
);
6866 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6867 node
, last
, max
, i
);
6874 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
6875 unsigned long min
, unsigned long max
, unsigned int depth
,
6876 enum mt_dump_format format
)
6878 struct maple_node
*node
= mte_to_node(entry
);
6879 unsigned int type
= mte_node_type(entry
);
6882 mt_dump_range(min
, max
, depth
, format
);
6884 pr_cont("node %p depth %d type %d parent %p", node
, depth
, type
,
6885 node
? node
->parent
: NULL
);
6889 for (i
= 0; i
< MAPLE_NODE_SLOTS
; i
++) {
6891 pr_cont("OUT OF RANGE: ");
6892 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6893 min
+ i
, min
+ i
, depth
, format
);
6897 case maple_range_64
:
6898 mt_dump_range64(mt
, entry
, min
, max
, depth
, format
);
6900 case maple_arange_64
:
6901 mt_dump_arange64(mt
, entry
, min
, max
, depth
, format
);
6905 pr_cont(" UNKNOWN TYPE\n");
6909 void mt_dump(const struct maple_tree
*mt
, enum mt_dump_format format
)
6911 void *entry
= rcu_dereference_check(mt
->ma_root
, mt_locked(mt
));
6913 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6914 mt
, mt
->ma_flags
, mt_height(mt
), entry
);
6915 if (!xa_is_node(entry
))
6916 mt_dump_entry(entry
, 0, 0, 0, format
);
6918 mt_dump_node(mt
, entry
, 0, mt_node_max(entry
), 0, format
);
6920 EXPORT_SYMBOL_GPL(mt_dump
);
6923 * Calculate the maximum gap in a node and check if that's what is reported in
6924 * the parent (unless root).
6926 static void mas_validate_gaps(struct ma_state
*mas
)
6928 struct maple_enode
*mte
= mas
->node
;
6929 struct maple_node
*p_mn
;
6930 unsigned long gap
= 0, max_gap
= 0;
6931 unsigned long p_end
, p_start
= mas
->min
;
6932 unsigned char p_slot
;
6933 unsigned long *gaps
= NULL
;
6934 unsigned long *pivots
= ma_pivots(mte_to_node(mte
), mte_node_type(mte
));
6937 if (ma_is_dense(mte_node_type(mte
))) {
6938 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
6939 if (mas_get_slot(mas
, i
)) {
6950 gaps
= ma_gaps(mte_to_node(mte
), mte_node_type(mte
));
6951 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
6952 p_end
= mas_logical_pivot(mas
, pivots
, i
, mte_node_type(mte
));
6955 if (mas_get_slot(mas
, i
)) {
6960 gap
+= p_end
- p_start
+ 1;
6962 void *entry
= mas_get_slot(mas
, i
);
6966 if (gap
!= p_end
- p_start
+ 1) {
6967 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6969 mas_get_slot(mas
, i
), gap
,
6971 mt_dump(mas
->tree
, mt_dump_hex
);
6973 MT_BUG_ON(mas
->tree
,
6974 gap
!= p_end
- p_start
+ 1);
6977 if (gap
> p_end
- p_start
+ 1) {
6978 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6979 mas_mn(mas
), i
, gap
, p_end
, p_start
,
6980 p_end
- p_start
+ 1);
6981 MT_BUG_ON(mas
->tree
,
6982 gap
> p_end
- p_start
+ 1);
6990 p_start
= p_end
+ 1;
6991 if (p_end
>= mas
->max
)
6996 if (mte_is_root(mte
))
6999 p_slot
= mte_parent_slot(mas
->node
);
7000 p_mn
= mte_parent(mte
);
7001 MT_BUG_ON(mas
->tree
, max_gap
> mas
->max
);
7002 if (ma_gaps(p_mn
, mas_parent_type(mas
, mte
))[p_slot
] != max_gap
) {
7003 pr_err("gap %p[%u] != %lu\n", p_mn
, p_slot
, max_gap
);
7004 mt_dump(mas
->tree
, mt_dump_hex
);
7007 MT_BUG_ON(mas
->tree
,
7008 ma_gaps(p_mn
, mas_parent_type(mas
, mte
))[p_slot
] != max_gap
);
7011 static void mas_validate_parent_slot(struct ma_state
*mas
)
7013 struct maple_node
*parent
;
7014 struct maple_enode
*node
;
7015 enum maple_type p_type
;
7016 unsigned char p_slot
;
7020 if (mte_is_root(mas
->node
))
7023 p_slot
= mte_parent_slot(mas
->node
);
7024 p_type
= mas_parent_type(mas
, mas
->node
);
7025 parent
= mte_parent(mas
->node
);
7026 slots
= ma_slots(parent
, p_type
);
7027 MT_BUG_ON(mas
->tree
, mas_mn(mas
) == parent
);
7029 /* Check prev/next parent slot for duplicate node entry */
7031 for (i
= 0; i
< mt_slots
[p_type
]; i
++) {
7032 node
= mas_slot(mas
, slots
, i
);
7034 if (node
!= mas
->node
)
7035 pr_err("parent %p[%u] does not have %p\n",
7036 parent
, i
, mas_mn(mas
));
7037 MT_BUG_ON(mas
->tree
, node
!= mas
->node
);
7038 } else if (node
== mas
->node
) {
7039 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7040 mas_mn(mas
), parent
, i
, p_slot
);
7041 MT_BUG_ON(mas
->tree
, node
== mas
->node
);
7046 static void mas_validate_child_slot(struct ma_state
*mas
)
7048 enum maple_type type
= mte_node_type(mas
->node
);
7049 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7050 unsigned long *pivots
= ma_pivots(mte_to_node(mas
->node
), type
);
7051 struct maple_enode
*child
;
7054 if (mte_is_leaf(mas
->node
))
7057 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7058 child
= mas_slot(mas
, slots
, i
);
7059 if (!pivots
[i
] || pivots
[i
] == mas
->max
)
7065 if (mte_parent_slot(child
) != i
) {
7066 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7067 mas_mn(mas
), i
, mte_to_node(child
),
7068 mte_parent_slot(child
));
7069 MT_BUG_ON(mas
->tree
, 1);
7072 if (mte_parent(child
) != mte_to_node(mas
->node
)) {
7073 pr_err("child %p has parent %p not %p\n",
7074 mte_to_node(child
), mte_parent(child
),
7075 mte_to_node(mas
->node
));
7076 MT_BUG_ON(mas
->tree
, 1);
7082 * Validate all pivots are within mas->min and mas->max.
7084 static void mas_validate_limits(struct ma_state
*mas
)
7087 unsigned long prev_piv
= 0;
7088 enum maple_type type
= mte_node_type(mas
->node
);
7089 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7090 unsigned long *pivots
= ma_pivots(mas_mn(mas
), type
);
7092 /* all limits are fine here. */
7093 if (mte_is_root(mas
->node
))
7096 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7099 piv
= mas_safe_pivot(mas
, pivots
, i
, type
);
7101 if (!piv
&& (i
!= 0))
7104 if (!mte_is_leaf(mas
->node
)) {
7105 void *entry
= mas_slot(mas
, slots
, i
);
7108 pr_err("%p[%u] cannot be null\n",
7111 MT_BUG_ON(mas
->tree
, !entry
);
7114 if (prev_piv
> piv
) {
7115 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7116 mas_mn(mas
), i
, piv
, prev_piv
);
7117 MAS_WARN_ON(mas
, piv
< prev_piv
);
7120 if (piv
< mas
->min
) {
7121 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas
), i
,
7123 MAS_WARN_ON(mas
, piv
< mas
->min
);
7125 if (piv
> mas
->max
) {
7126 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas
), i
,
7128 MAS_WARN_ON(mas
, piv
> mas
->max
);
7131 if (piv
== mas
->max
)
7134 for (i
+= 1; i
< mt_slots
[type
]; i
++) {
7135 void *entry
= mas_slot(mas
, slots
, i
);
7137 if (entry
&& (i
!= mt_slots
[type
] - 1)) {
7138 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas
),
7140 MT_BUG_ON(mas
->tree
, entry
!= NULL
);
7143 if (i
< mt_pivots
[type
]) {
7144 unsigned long piv
= pivots
[i
];
7149 pr_err("%p[%u] should not have piv %lu\n",
7150 mas_mn(mas
), i
, piv
);
7151 MAS_WARN_ON(mas
, i
< mt_pivots
[type
] - 1);
7156 static void mt_validate_nulls(struct maple_tree
*mt
)
7158 void *entry
, *last
= (void *)1;
7159 unsigned char offset
= 0;
7161 MA_STATE(mas
, mt
, 0, 0);
7164 if (mas_is_none(&mas
) || (mas
.node
== MAS_ROOT
))
7167 while (!mte_is_leaf(mas
.node
))
7170 slots
= ma_slots(mte_to_node(mas
.node
), mte_node_type(mas
.node
));
7172 entry
= mas_slot(&mas
, slots
, offset
);
7173 if (!last
&& !entry
) {
7174 pr_err("Sequential nulls end at %p[%u]\n",
7175 mas_mn(&mas
), offset
);
7177 MT_BUG_ON(mt
, !last
&& !entry
);
7179 if (offset
== mas_data_end(&mas
)) {
7180 mas_next_node(&mas
, mas_mn(&mas
), ULONG_MAX
);
7181 if (mas_is_none(&mas
))
7184 slots
= ma_slots(mte_to_node(mas
.node
),
7185 mte_node_type(mas
.node
));
7190 } while (!mas_is_none(&mas
));
7194 * validate a maple tree by checking:
7195 * 1. The limits (pivots are within mas->min to mas->max)
7196 * 2. The gap is correctly set in the parents
7198 void mt_validate(struct maple_tree
*mt
)
7202 MA_STATE(mas
, mt
, 0, 0);
7205 if (!mas_searchable(&mas
))
7208 mas_first_entry(&mas
, mas_mn(&mas
), ULONG_MAX
, mte_node_type(mas
.node
));
7209 while (!mas_is_none(&mas
)) {
7210 MAS_WARN_ON(&mas
, mte_dead_node(mas
.node
));
7211 if (!mte_is_root(mas
.node
)) {
7212 end
= mas_data_end(&mas
);
7213 if (MAS_WARN_ON(&mas
,
7214 (end
< mt_min_slot_count(mas
.node
)) &&
7215 (mas
.max
!= ULONG_MAX
))) {
7216 pr_err("Invalid size %u of %p\n", end
,
7220 mas_validate_parent_slot(&mas
);
7221 mas_validate_child_slot(&mas
);
7222 mas_validate_limits(&mas
);
7223 if (mt_is_alloc(mt
))
7224 mas_validate_gaps(&mas
);
7225 mas_dfs_postorder(&mas
, ULONG_MAX
);
7227 mt_validate_nulls(mt
);
7232 EXPORT_SYMBOL_GPL(mt_validate
);
7234 void mas_dump(const struct ma_state
*mas
)
7236 pr_err("MAS: tree=%p enode=%p ", mas
->tree
, mas
->node
);
7237 if (mas_is_none(mas
))
7238 pr_err("(MAS_NONE) ");
7239 else if (mas_is_ptr(mas
))
7240 pr_err("(MAS_ROOT) ");
7241 else if (mas_is_start(mas
))
7242 pr_err("(MAS_START) ");
7243 else if (mas_is_paused(mas
))
7244 pr_err("(MAS_PAUSED) ");
7246 pr_err("[%u] index=%lx last=%lx\n", mas
->offset
, mas
->index
, mas
->last
);
7247 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7248 mas
->min
, mas
->max
, mas
->alloc
, mas
->depth
, mas
->mas_flags
);
7249 if (mas
->index
> mas
->last
)
7250 pr_err("Check index & last\n");
7252 EXPORT_SYMBOL_GPL(mas_dump
);
7254 void mas_wr_dump(const struct ma_wr_state
*wr_mas
)
7256 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7257 wr_mas
->node
, wr_mas
->r_min
, wr_mas
->r_max
);
7258 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7259 wr_mas
->type
, wr_mas
->offset_end
, wr_mas
->node_end
,
7262 EXPORT_SYMBOL_GPL(mas_wr_dump
);
7264 #endif /* CONFIG_DEBUG_MAPLE_TREE */