1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache
*maple_node_cache
;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max
[] = {
84 [maple_dense
] = MAPLE_NODE_SLOTS
,
85 [maple_leaf_64
] = ULONG_MAX
,
86 [maple_range_64
] = ULONG_MAX
,
87 [maple_arange_64
] = ULONG_MAX
,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots
[] = {
93 [maple_dense
] = MAPLE_NODE_SLOTS
,
94 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
,
95 [maple_range_64
] = MAPLE_RANGE64_SLOTS
,
96 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots
[] = {
102 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
- 1,
103 [maple_range_64
] = MAPLE_RANGE64_SLOTS
- 1,
104 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
- 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots
[] = {
109 [maple_dense
] = MAPLE_NODE_SLOTS
/ 2,
110 [maple_leaf_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
111 [maple_range_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
112 [maple_arange_64
] = (MAPLE_ARANGE64_SLOTS
/ 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node
{
120 struct maple_pnode
*parent
;
121 unsigned long pivot
[MAPLE_BIG_NODE_SLOTS
- 1];
123 struct maple_enode
*slot
[MAPLE_BIG_NODE_SLOTS
];
125 unsigned long padding
[MAPLE_BIG_NODE_GAPS
];
126 unsigned long gap
[MAPLE_BIG_NODE_GAPS
];
130 enum maple_type type
;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state
{
139 struct ma_state
*orig_l
; /* Original left side of subtree */
140 struct ma_state
*orig_r
; /* Original right side of subtree */
141 struct ma_state
*l
; /* New left side of subtree */
142 struct ma_state
*m
; /* New middle of subtree (rare) */
143 struct ma_state
*r
; /* New right side of subtree */
144 struct ma_topiary
*free
; /* nodes to be freed */
145 struct ma_topiary
*destroy
; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node
*bn
;
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
153 #define noinline_for_kasan inline
157 static inline struct maple_node
*mt_alloc_one(gfp_t gfp
)
159 return kmem_cache_alloc(maple_node_cache
, gfp
);
162 static inline int mt_alloc_bulk(gfp_t gfp
, size_t size
, void **nodes
)
164 return kmem_cache_alloc_bulk(maple_node_cache
, gfp
, size
, nodes
);
167 static inline void mt_free_bulk(size_t size
, void __rcu
**nodes
)
169 kmem_cache_free_bulk(maple_node_cache
, size
, (void **)nodes
);
172 static void mt_free_rcu(struct rcu_head
*head
)
174 struct maple_node
*node
= container_of(head
, struct maple_node
, rcu
);
176 kmem_cache_free(maple_node_cache
, node
);
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
186 static void ma_free_rcu(struct maple_node
*node
)
188 WARN_ON(node
->parent
!= ma_parent_ptr(node
));
189 call_rcu(&node
->rcu
, mt_free_rcu
);
192 static void mas_set_height(struct ma_state
*mas
)
194 unsigned int new_flags
= mas
->tree
->ma_flags
;
196 new_flags
&= ~MT_FLAGS_HEIGHT_MASK
;
197 BUG_ON(mas
->depth
> MAPLE_HEIGHT_MAX
);
198 new_flags
|= mas
->depth
<< MT_FLAGS_HEIGHT_OFFSET
;
199 mas
->tree
->ma_flags
= new_flags
;
202 static unsigned int mas_mt_height(struct ma_state
*mas
)
204 return mt_height(mas
->tree
);
207 static inline enum maple_type
mte_node_type(const struct maple_enode
*entry
)
209 return ((unsigned long)entry
>> MAPLE_NODE_TYPE_SHIFT
) &
210 MAPLE_NODE_TYPE_MASK
;
213 static inline bool ma_is_dense(const enum maple_type type
)
215 return type
< maple_leaf_64
;
218 static inline bool ma_is_leaf(const enum maple_type type
)
220 return type
< maple_range_64
;
223 static inline bool mte_is_leaf(const struct maple_enode
*entry
)
225 return ma_is_leaf(mte_node_type(entry
));
229 * We also reserve values with the bottom two bits set to '10' which are
232 static inline bool mt_is_reserved(const void *entry
)
234 return ((unsigned long)entry
< MAPLE_RESERVED_RANGE
) &&
235 xa_is_internal(entry
);
238 static inline void mas_set_err(struct ma_state
*mas
, long err
)
240 mas
->node
= MA_ERROR(err
);
243 static inline bool mas_is_ptr(struct ma_state
*mas
)
245 return mas
->node
== MAS_ROOT
;
248 static inline bool mas_is_start(struct ma_state
*mas
)
250 return mas
->node
== MAS_START
;
253 bool mas_is_err(struct ma_state
*mas
)
255 return xa_is_err(mas
->node
);
258 static inline bool mas_searchable(struct ma_state
*mas
)
260 if (mas_is_none(mas
))
269 static inline struct maple_node
*mte_to_node(const struct maple_enode
*entry
)
271 return (struct maple_node
*)((unsigned long)entry
& ~MAPLE_NODE_MASK
);
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
278 * Return: a maple topiary pointer
280 static inline struct maple_topiary
*mte_to_mat(const struct maple_enode
*entry
)
282 return (struct maple_topiary
*)
283 ((unsigned long)entry
& ~MAPLE_NODE_MASK
);
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
290 * Return: the maple node (not encoded - bare pointer).
292 static inline struct maple_node
*mas_mn(const struct ma_state
*mas
)
294 return mte_to_node(mas
->node
);
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
301 static inline void mte_set_node_dead(struct maple_enode
*mn
)
303 mte_to_node(mn
)->parent
= ma_parent_ptr(mte_to_node(mn
));
304 smp_wmb(); /* Needed for RCU */
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
314 static inline struct maple_enode
*mt_mk_node(const struct maple_node
*node
,
315 enum maple_type type
)
317 return (void *)((unsigned long)node
|
318 (type
<< MAPLE_ENODE_TYPE_SHIFT
) | MAPLE_ENODE_NULL
);
321 static inline void *mte_mk_root(const struct maple_enode
*node
)
323 return (void *)((unsigned long)node
| MAPLE_ROOT_NODE
);
326 static inline void *mte_safe_root(const struct maple_enode
*node
)
328 return (void *)((unsigned long)node
& ~MAPLE_ROOT_NODE
);
331 static inline void *mte_set_full(const struct maple_enode
*node
)
333 return (void *)((unsigned long)node
& ~MAPLE_ENODE_NULL
);
336 static inline void *mte_clear_full(const struct maple_enode
*node
)
338 return (void *)((unsigned long)node
| MAPLE_ENODE_NULL
);
341 static inline bool mte_has_null(const struct maple_enode
*node
)
343 return (unsigned long)node
& MAPLE_ENODE_NULL
;
346 static inline bool ma_is_root(struct maple_node
*node
)
348 return ((unsigned long)node
->parent
& MA_ROOT_PARENT
);
351 static inline bool mte_is_root(const struct maple_enode
*node
)
353 return ma_is_root(mte_to_node(node
));
356 static inline bool mas_is_root_limits(const struct ma_state
*mas
)
358 return !mas
->min
&& mas
->max
== ULONG_MAX
;
361 static inline bool mt_is_alloc(struct maple_tree
*mt
)
363 return (mt
->ma_flags
& MT_FLAGS_ALLOC_RANGE
);
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
380 * Slot size and alignment
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
387 #define MAPLE_PARENT_ROOT 0x01
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
404 static inline unsigned long mte_parent_shift(unsigned long parent
)
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
408 return MAPLE_PARENT_SLOT_SHIFT
;
410 return MAPLE_PARENT_16B_SLOT_SHIFT
;
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent
)
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
422 return MAPLE_PARENT_SLOT_MASK
;
424 return MAPLE_PARENT_16B_SLOT_MASK
;
428 * mas_parent_enum() - Return the maple_type of the parent from the stored
430 * @mas: The maple state
431 * @node: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
435 enum maple_type
mte_parent_enum(struct maple_enode
*p_enode
,
436 struct maple_tree
*mt
)
438 unsigned long p_type
;
440 p_type
= (unsigned long)p_enode
;
441 if (p_type
& MAPLE_PARENT_ROOT
)
442 return 0; /* Validated in the caller. */
444 p_type
&= MAPLE_NODE_MASK
;
445 p_type
= p_type
& ~(MAPLE_PARENT_ROOT
| mte_parent_slot_mask(p_type
));
448 case MAPLE_PARENT_RANGE64
: /* or MAPLE_PARENT_ARANGE64 */
450 return maple_arange_64
;
451 return maple_range_64
;
458 enum maple_type
mas_parent_enum(struct ma_state
*mas
, struct maple_enode
*enode
)
460 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode
)->parent
), mas
->tree
);
464 * mte_set_parent() - Set the parent node and encode the slot
465 * @enode: The encoded maple node.
466 * @parent: The encoded maple node that is the parent of @enode.
467 * @slot: The slot that @enode resides in @parent.
469 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
473 void mte_set_parent(struct maple_enode
*enode
, const struct maple_enode
*parent
,
476 unsigned long val
= (unsigned long)parent
;
479 enum maple_type p_type
= mte_node_type(parent
);
481 BUG_ON(p_type
== maple_dense
);
482 BUG_ON(p_type
== maple_leaf_64
);
486 case maple_arange_64
:
487 shift
= MAPLE_PARENT_SLOT_SHIFT
;
488 type
= MAPLE_PARENT_RANGE64
;
497 val
&= ~MAPLE_NODE_MASK
; /* Clear all node metadata in parent */
498 val
|= (slot
<< shift
) | type
;
499 mte_to_node(enode
)->parent
= ma_parent_ptr(val
);
503 * mte_parent_slot() - get the parent slot of @enode.
504 * @enode: The encoded maple node.
506 * Return: The slot in the parent node where @enode resides.
508 static inline unsigned int mte_parent_slot(const struct maple_enode
*enode
)
510 unsigned long val
= (unsigned long)mte_to_node(enode
)->parent
;
512 if (val
& MA_ROOT_PARENT
)
516 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
517 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
519 return (val
& MAPLE_PARENT_16B_SLOT_MASK
) >> mte_parent_shift(val
);
523 * mte_parent() - Get the parent of @node.
524 * @node: The encoded maple node.
526 * Return: The parent maple node.
528 static inline struct maple_node
*mte_parent(const struct maple_enode
*enode
)
530 return (void *)((unsigned long)
531 (mte_to_node(enode
)->parent
) & ~MAPLE_NODE_MASK
);
535 * ma_dead_node() - check if the @enode is dead.
536 * @enode: The encoded maple node
538 * Return: true if dead, false otherwise.
540 static inline bool ma_dead_node(const struct maple_node
*node
)
542 struct maple_node
*parent
;
544 /* Do not reorder reads from the node prior to the parent check */
546 parent
= (void *)((unsigned long) node
->parent
& ~MAPLE_NODE_MASK
);
547 return (parent
== node
);
551 * mte_dead_node() - check if the @enode is dead.
552 * @enode: The encoded maple node
554 * Return: true if dead, false otherwise.
556 static inline bool mte_dead_node(const struct maple_enode
*enode
)
558 struct maple_node
*parent
, *node
;
560 node
= mte_to_node(enode
);
561 /* Do not reorder reads from the node prior to the parent check */
563 parent
= mte_parent(enode
);
564 return (parent
== node
);
568 * mas_allocated() - Get the number of nodes allocated in a maple state.
569 * @mas: The maple state
571 * The ma_state alloc member is overloaded to hold a pointer to the first
572 * allocated node or to the number of requested nodes to allocate. If bit 0 is
573 * set, then the alloc contains the number of requested nodes. If there is an
574 * allocated node, then the total allocated nodes is in that node.
576 * Return: The total number of nodes allocated
578 static inline unsigned long mas_allocated(const struct ma_state
*mas
)
580 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1))
583 return mas
->alloc
->total
;
587 * mas_set_alloc_req() - Set the requested number of allocations.
588 * @mas: the maple state
589 * @count: the number of allocations.
591 * The requested number of allocations is either in the first allocated node,
592 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
593 * no allocated node. Set the request either in the node or do the necessary
594 * encoding to store in @mas->alloc directly.
596 static inline void mas_set_alloc_req(struct ma_state
*mas
, unsigned long count
)
598 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1)) {
602 mas
->alloc
= (struct maple_alloc
*)(((count
) << 1U) | 1U);
606 mas
->alloc
->request_count
= count
;
610 * mas_alloc_req() - get the requested number of allocations.
611 * @mas: The maple state
613 * The alloc count is either stored directly in @mas, or in
614 * @mas->alloc->request_count if there is at least one node allocated. Decode
615 * the request count if it's stored directly in @mas->alloc.
617 * Return: The allocation request count.
619 static inline unsigned int mas_alloc_req(const struct ma_state
*mas
)
621 if ((unsigned long)mas
->alloc
& 0x1)
622 return (unsigned long)(mas
->alloc
) >> 1;
624 return mas
->alloc
->request_count
;
629 * ma_pivots() - Get a pointer to the maple node pivots.
630 * @node - the maple node
631 * @type - the node type
633 * In the event of a dead node, this array may be %NULL
635 * Return: A pointer to the maple node pivots
637 static inline unsigned long *ma_pivots(struct maple_node
*node
,
638 enum maple_type type
)
641 case maple_arange_64
:
642 return node
->ma64
.pivot
;
645 return node
->mr64
.pivot
;
653 * ma_gaps() - Get a pointer to the maple node gaps.
654 * @node - the maple node
655 * @type - the node type
657 * Return: A pointer to the maple node gaps
659 static inline unsigned long *ma_gaps(struct maple_node
*node
,
660 enum maple_type type
)
663 case maple_arange_64
:
664 return node
->ma64
.gap
;
674 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
675 * @mn: The maple encoded node.
678 * Return: the pivot at @piv of @mn.
680 static inline unsigned long mte_pivot(const struct maple_enode
*mn
,
683 struct maple_node
*node
= mte_to_node(mn
);
684 enum maple_type type
= mte_node_type(mn
);
686 if (piv
>= mt_pivots
[type
]) {
691 case maple_arange_64
:
692 return node
->ma64
.pivot
[piv
];
695 return node
->mr64
.pivot
[piv
];
703 * mas_safe_pivot() - get the pivot at @piv or mas->max.
704 * @mas: The maple state
705 * @pivots: The pointer to the maple node pivots
706 * @piv: The pivot to fetch
707 * @type: The maple node type
709 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
712 static inline unsigned long
713 mas_safe_pivot(const struct ma_state
*mas
, unsigned long *pivots
,
714 unsigned char piv
, enum maple_type type
)
716 if (piv
>= mt_pivots
[type
])
723 * mas_safe_min() - Return the minimum for a given offset.
724 * @mas: The maple state
725 * @pivots: The pointer to the maple node pivots
726 * @offset: The offset into the pivot array
728 * Return: The minimum range value that is contained in @offset.
730 static inline unsigned long
731 mas_safe_min(struct ma_state
*mas
, unsigned long *pivots
, unsigned char offset
)
734 return pivots
[offset
- 1] + 1;
740 * mas_logical_pivot() - Get the logical pivot of a given offset.
741 * @mas: The maple state
742 * @pivots: The pointer to the maple node pivots
743 * @offset: The offset into the pivot array
744 * @type: The maple node type
746 * When there is no value at a pivot (beyond the end of the data), then the
747 * pivot is actually @mas->max.
749 * Return: the logical pivot of a given @offset.
751 static inline unsigned long
752 mas_logical_pivot(struct ma_state
*mas
, unsigned long *pivots
,
753 unsigned char offset
, enum maple_type type
)
755 unsigned long lpiv
= mas_safe_pivot(mas
, pivots
, offset
, type
);
767 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
768 * @mn: The encoded maple node
769 * @piv: The pivot offset
770 * @val: The value of the pivot
772 static inline void mte_set_pivot(struct maple_enode
*mn
, unsigned char piv
,
775 struct maple_node
*node
= mte_to_node(mn
);
776 enum maple_type type
= mte_node_type(mn
);
778 BUG_ON(piv
>= mt_pivots
[type
]);
783 node
->mr64
.pivot
[piv
] = val
;
785 case maple_arange_64
:
786 node
->ma64
.pivot
[piv
] = val
;
795 * ma_slots() - Get a pointer to the maple node slots.
796 * @mn: The maple node
797 * @mt: The maple node type
799 * Return: A pointer to the maple node slots
801 static inline void __rcu
**ma_slots(struct maple_node
*mn
, enum maple_type mt
)
805 case maple_arange_64
:
806 return mn
->ma64
.slot
;
809 return mn
->mr64
.slot
;
815 static inline bool mt_locked(const struct maple_tree
*mt
)
817 return mt_external_lock(mt
) ? mt_lock_is_held(mt
) :
818 lockdep_is_held(&mt
->ma_lock
);
821 static inline void *mt_slot(const struct maple_tree
*mt
,
822 void __rcu
**slots
, unsigned char offset
)
824 return rcu_dereference_check(slots
[offset
], mt_locked(mt
));
827 static inline void *mt_slot_locked(struct maple_tree
*mt
, void __rcu
**slots
,
828 unsigned char offset
)
830 return rcu_dereference_protected(slots
[offset
], mt_locked(mt
));
833 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
834 * @mas: The maple state
835 * @slots: The pointer to the slots
836 * @offset: The offset into the slots array to fetch
838 * Return: The entry stored in @slots at the @offset.
840 static inline void *mas_slot_locked(struct ma_state
*mas
, void __rcu
**slots
,
841 unsigned char offset
)
843 return mt_slot_locked(mas
->tree
, slots
, offset
);
847 * mas_slot() - Get the slot value when not holding the maple tree lock.
848 * @mas: The maple state
849 * @slots: The pointer to the slots
850 * @offset: The offset into the slots array to fetch
852 * Return: The entry stored in @slots at the @offset
854 static inline void *mas_slot(struct ma_state
*mas
, void __rcu
**slots
,
855 unsigned char offset
)
857 return mt_slot(mas
->tree
, slots
, offset
);
861 * mas_root() - Get the maple tree root.
862 * @mas: The maple state.
864 * Return: The pointer to the root of the tree
866 static inline void *mas_root(struct ma_state
*mas
)
868 return rcu_dereference_check(mas
->tree
->ma_root
, mt_locked(mas
->tree
));
871 static inline void *mt_root_locked(struct maple_tree
*mt
)
873 return rcu_dereference_protected(mt
->ma_root
, mt_locked(mt
));
877 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
878 * @mas: The maple state.
880 * Return: The pointer to the root of the tree
882 static inline void *mas_root_locked(struct ma_state
*mas
)
884 return mt_root_locked(mas
->tree
);
887 static inline struct maple_metadata
*ma_meta(struct maple_node
*mn
,
891 case maple_arange_64
:
892 return &mn
->ma64
.meta
;
894 return &mn
->mr64
.meta
;
899 * ma_set_meta() - Set the metadata information of a node.
900 * @mn: The maple node
901 * @mt: The maple node type
902 * @offset: The offset of the highest sub-gap in this node.
903 * @end: The end of the data in this node.
905 static inline void ma_set_meta(struct maple_node
*mn
, enum maple_type mt
,
906 unsigned char offset
, unsigned char end
)
908 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
915 * mt_clear_meta() - clear the metadata information of a node, if it exists
916 * @mt: The maple tree
917 * @mn: The maple node
918 * @type: The maple node type
919 * @offset: The offset of the highest sub-gap in this node.
920 * @end: The end of the data in this node.
922 static inline void mt_clear_meta(struct maple_tree
*mt
, struct maple_node
*mn
,
923 enum maple_type type
)
925 struct maple_metadata
*meta
;
926 unsigned long *pivots
;
932 pivots
= mn
->mr64
.pivot
;
933 if (unlikely(pivots
[MAPLE_RANGE64_SLOTS
- 2])) {
934 slots
= mn
->mr64
.slot
;
935 next
= mt_slot_locked(mt
, slots
,
936 MAPLE_RANGE64_SLOTS
- 1);
937 if (unlikely((mte_to_node(next
) &&
938 mte_node_type(next
))))
939 return; /* no metadata, could be node */
942 case maple_arange_64
:
943 meta
= ma_meta(mn
, type
);
954 * ma_meta_end() - Get the data end of a node from the metadata
955 * @mn: The maple node
956 * @mt: The maple node type
958 static inline unsigned char ma_meta_end(struct maple_node
*mn
,
961 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
967 * ma_meta_gap() - Get the largest gap location of a node from the metadata
968 * @mn: The maple node
969 * @mt: The maple node type
971 static inline unsigned char ma_meta_gap(struct maple_node
*mn
,
974 BUG_ON(mt
!= maple_arange_64
);
976 return mn
->ma64
.meta
.gap
;
980 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
981 * @mn: The maple node
982 * @mn: The maple node type
983 * @offset: The location of the largest gap.
985 static inline void ma_set_meta_gap(struct maple_node
*mn
, enum maple_type mt
,
986 unsigned char offset
)
989 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
995 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
996 * @mat - the ma_topiary, a linked list of dead nodes.
997 * @dead_enode - the node to be marked as dead and added to the tail of the list
999 * Add the @dead_enode to the linked list in @mat.
1001 static inline void mat_add(struct ma_topiary
*mat
,
1002 struct maple_enode
*dead_enode
)
1004 mte_set_node_dead(dead_enode
);
1005 mte_to_mat(dead_enode
)->next
= NULL
;
1007 mat
->tail
= mat
->head
= dead_enode
;
1011 mte_to_mat(mat
->tail
)->next
= dead_enode
;
1012 mat
->tail
= dead_enode
;
1015 static void mte_destroy_walk(struct maple_enode
*, struct maple_tree
*);
1016 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
);
1019 * mas_mat_free() - Free all nodes in a dead list.
1020 * @mas - the maple state
1021 * @mat - the ma_topiary linked list of dead nodes to free.
1023 * Free walk a dead list.
1025 static void mas_mat_free(struct ma_state
*mas
, struct ma_topiary
*mat
)
1027 struct maple_enode
*next
;
1030 next
= mte_to_mat(mat
->head
)->next
;
1031 mas_free(mas
, mat
->head
);
1037 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1038 * @mas - the maple state
1039 * @mat - the ma_topiary linked list of dead nodes to free.
1041 * Destroy walk a dead list.
1043 static void mas_mat_destroy(struct ma_state
*mas
, struct ma_topiary
*mat
)
1045 struct maple_enode
*next
;
1048 next
= mte_to_mat(mat
->head
)->next
;
1049 mte_destroy_walk(mat
->head
, mat
->mtree
);
1054 * mas_descend() - Descend into the slot stored in the ma_state.
1055 * @mas - the maple state.
1057 * Note: Not RCU safe, only use in write side or debug code.
1059 static inline void mas_descend(struct ma_state
*mas
)
1061 enum maple_type type
;
1062 unsigned long *pivots
;
1063 struct maple_node
*node
;
1067 type
= mte_node_type(mas
->node
);
1068 pivots
= ma_pivots(node
, type
);
1069 slots
= ma_slots(node
, type
);
1072 mas
->min
= pivots
[mas
->offset
- 1] + 1;
1073 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, type
);
1074 mas
->node
= mas_slot(mas
, slots
, mas
->offset
);
1078 * mte_set_gap() - Set a maple node gap.
1079 * @mn: The encoded maple node
1080 * @gap: The offset of the gap to set
1081 * @val: The gap value
1083 static inline void mte_set_gap(const struct maple_enode
*mn
,
1084 unsigned char gap
, unsigned long val
)
1086 switch (mte_node_type(mn
)) {
1089 case maple_arange_64
:
1090 mte_to_node(mn
)->ma64
.gap
[gap
] = val
;
1096 * mas_ascend() - Walk up a level of the tree.
1097 * @mas: The maple state
1099 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1100 * may cause several levels of walking up to find the correct min and max.
1101 * May find a dead node which will cause a premature return.
1102 * Return: 1 on dead node, 0 otherwise
1104 static int mas_ascend(struct ma_state
*mas
)
1106 struct maple_enode
*p_enode
; /* parent enode. */
1107 struct maple_enode
*a_enode
; /* ancestor enode. */
1108 struct maple_node
*a_node
; /* ancestor node. */
1109 struct maple_node
*p_node
; /* parent node. */
1110 unsigned char a_slot
;
1111 enum maple_type a_type
;
1112 unsigned long min
, max
;
1113 unsigned long *pivots
;
1114 unsigned char offset
;
1115 bool set_max
= false, set_min
= false;
1117 a_node
= mas_mn(mas
);
1118 if (ma_is_root(a_node
)) {
1123 p_node
= mte_parent(mas
->node
);
1124 if (unlikely(a_node
== p_node
))
1126 a_type
= mas_parent_enum(mas
, mas
->node
);
1127 offset
= mte_parent_slot(mas
->node
);
1128 a_enode
= mt_mk_node(p_node
, a_type
);
1130 /* Check to make sure all parent information is still accurate */
1131 if (p_node
!= mte_parent(mas
->node
))
1134 mas
->node
= a_enode
;
1135 mas
->offset
= offset
;
1137 if (mte_is_root(a_enode
)) {
1138 mas
->max
= ULONG_MAX
;
1147 a_type
= mas_parent_enum(mas
, p_enode
);
1148 a_node
= mte_parent(p_enode
);
1149 a_slot
= mte_parent_slot(p_enode
);
1150 a_enode
= mt_mk_node(a_node
, a_type
);
1151 pivots
= ma_pivots(a_node
, a_type
);
1153 if (unlikely(ma_dead_node(a_node
)))
1156 if (!set_min
&& a_slot
) {
1158 min
= pivots
[a_slot
- 1] + 1;
1161 if (!set_max
&& a_slot
< mt_pivots
[a_type
]) {
1163 max
= pivots
[a_slot
];
1166 if (unlikely(ma_dead_node(a_node
)))
1169 if (unlikely(ma_is_root(a_node
)))
1172 } while (!set_min
|| !set_max
);
1180 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1181 * @mas: The maple state
1183 * Return: A pointer to a maple node.
1185 static inline struct maple_node
*mas_pop_node(struct ma_state
*mas
)
1187 struct maple_alloc
*ret
, *node
= mas
->alloc
;
1188 unsigned long total
= mas_allocated(mas
);
1189 unsigned int req
= mas_alloc_req(mas
);
1191 /* nothing or a request pending. */
1192 if (WARN_ON(!total
))
1196 /* single allocation in this ma_state */
1202 if (node
->node_count
== 1) {
1203 /* Single allocation in this node. */
1204 mas
->alloc
= node
->slot
[0];
1205 mas
->alloc
->total
= node
->total
- 1;
1210 ret
= node
->slot
[--node
->node_count
];
1211 node
->slot
[node
->node_count
] = NULL
;
1217 mas_set_alloc_req(mas
, req
);
1220 memset(ret
, 0, sizeof(*ret
));
1221 return (struct maple_node
*)ret
;
1225 * mas_push_node() - Push a node back on the maple state allocation.
1226 * @mas: The maple state
1227 * @used: The used maple node
1229 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1230 * requested node count as necessary.
1232 static inline void mas_push_node(struct ma_state
*mas
, struct maple_node
*used
)
1234 struct maple_alloc
*reuse
= (struct maple_alloc
*)used
;
1235 struct maple_alloc
*head
= mas
->alloc
;
1236 unsigned long count
;
1237 unsigned int requested
= mas_alloc_req(mas
);
1239 count
= mas_allocated(mas
);
1241 reuse
->request_count
= 0;
1242 reuse
->node_count
= 0;
1243 if (count
&& (head
->node_count
< MAPLE_ALLOC_SLOTS
)) {
1244 head
->slot
[head
->node_count
++] = reuse
;
1250 if ((head
) && !((unsigned long)head
& 0x1)) {
1251 reuse
->slot
[0] = head
;
1252 reuse
->node_count
= 1;
1253 reuse
->total
+= head
->total
;
1259 mas_set_alloc_req(mas
, requested
- 1);
1263 * mas_alloc_nodes() - Allocate nodes into a maple state
1264 * @mas: The maple state
1265 * @gfp: The GFP Flags
1267 static inline void mas_alloc_nodes(struct ma_state
*mas
, gfp_t gfp
)
1269 struct maple_alloc
*node
;
1270 unsigned long allocated
= mas_allocated(mas
);
1271 unsigned int requested
= mas_alloc_req(mas
);
1273 void **slots
= NULL
;
1274 unsigned int max_req
= 0;
1279 mas_set_alloc_req(mas
, 0);
1280 if (mas
->mas_flags
& MA_STATE_PREALLOC
) {
1283 WARN_ON(!allocated
);
1286 if (!allocated
|| mas
->alloc
->node_count
== MAPLE_ALLOC_SLOTS
) {
1287 node
= (struct maple_alloc
*)mt_alloc_one(gfp
);
1292 node
->slot
[0] = mas
->alloc
;
1293 node
->node_count
= 1;
1295 node
->node_count
= 0;
1299 node
->total
= ++allocated
;
1304 node
->request_count
= 0;
1306 max_req
= MAPLE_ALLOC_SLOTS
- node
->node_count
;
1307 slots
= (void **)&node
->slot
[node
->node_count
];
1308 max_req
= min(requested
, max_req
);
1309 count
= mt_alloc_bulk(gfp
, max_req
, slots
);
1313 if (node
->node_count
== 0) {
1314 node
->slot
[0]->node_count
= 0;
1315 node
->slot
[0]->request_count
= 0;
1318 node
->node_count
+= count
;
1320 node
= node
->slot
[0];
1323 mas
->alloc
->total
= allocated
;
1327 /* Clean up potential freed allocations on bulk failure */
1328 memset(slots
, 0, max_req
* sizeof(unsigned long));
1330 mas_set_alloc_req(mas
, requested
);
1331 if (mas
->alloc
&& !(((unsigned long)mas
->alloc
& 0x1)))
1332 mas
->alloc
->total
= allocated
;
1333 mas_set_err(mas
, -ENOMEM
);
1337 * mas_free() - Free an encoded maple node
1338 * @mas: The maple state
1339 * @used: The encoded maple node to free.
1341 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1344 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
)
1346 struct maple_node
*tmp
= mte_to_node(used
);
1348 if (mt_in_rcu(mas
->tree
))
1351 mas_push_node(mas
, tmp
);
1355 * mas_node_count() - Check if enough nodes are allocated and request more if
1356 * there is not enough nodes.
1357 * @mas: The maple state
1358 * @count: The number of nodes needed
1359 * @gfp: the gfp flags
1361 static void mas_node_count_gfp(struct ma_state
*mas
, int count
, gfp_t gfp
)
1363 unsigned long allocated
= mas_allocated(mas
);
1365 if (allocated
< count
) {
1366 mas_set_alloc_req(mas
, count
- allocated
);
1367 mas_alloc_nodes(mas
, gfp
);
1372 * mas_node_count() - Check if enough nodes are allocated and request more if
1373 * there is not enough nodes.
1374 * @mas: The maple state
1375 * @count: The number of nodes needed
1377 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1379 static void mas_node_count(struct ma_state
*mas
, int count
)
1381 return mas_node_count_gfp(mas
, count
, GFP_NOWAIT
| __GFP_NOWARN
);
1385 * mas_start() - Sets up maple state for operations.
1386 * @mas: The maple state.
1388 * If mas->node == MAS_START, then set the min, max and depth to
1392 * - If mas->node is an error or not MAS_START, return NULL.
1393 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1394 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1395 * - If it's a tree: NULL & mas->node == safe root node.
1397 static inline struct maple_enode
*mas_start(struct ma_state
*mas
)
1399 if (likely(mas_is_start(mas
))) {
1400 struct maple_enode
*root
;
1403 mas
->max
= ULONG_MAX
;
1407 root
= mas_root(mas
);
1408 /* Tree with nodes */
1409 if (likely(xa_is_node(root
))) {
1411 mas
->node
= mte_safe_root(root
);
1413 if (mte_dead_node(mas
->node
))
1420 if (unlikely(!root
)) {
1421 mas
->node
= MAS_NONE
;
1422 mas
->offset
= MAPLE_NODE_SLOTS
;
1426 /* Single entry tree */
1427 mas
->node
= MAS_ROOT
;
1428 mas
->offset
= MAPLE_NODE_SLOTS
;
1430 /* Single entry tree. */
1441 * ma_data_end() - Find the end of the data in a node.
1442 * @node: The maple node
1443 * @type: The maple node type
1444 * @pivots: The array of pivots in the node
1445 * @max: The maximum value in the node
1447 * Uses metadata to find the end of the data when possible.
1448 * Return: The zero indexed last slot with data (may be null).
1450 static inline unsigned char ma_data_end(struct maple_node
*node
,
1451 enum maple_type type
,
1452 unsigned long *pivots
,
1455 unsigned char offset
;
1460 if (type
== maple_arange_64
)
1461 return ma_meta_end(node
, type
);
1463 offset
= mt_pivots
[type
] - 1;
1464 if (likely(!pivots
[offset
]))
1465 return ma_meta_end(node
, type
);
1467 if (likely(pivots
[offset
] == max
))
1470 return mt_pivots
[type
];
1474 * mas_data_end() - Find the end of the data (slot).
1475 * @mas: the maple state
1477 * This method is optimized to check the metadata of a node if the node type
1478 * supports data end metadata.
1480 * Return: The zero indexed last slot with data (may be null).
1482 static inline unsigned char mas_data_end(struct ma_state
*mas
)
1484 enum maple_type type
;
1485 struct maple_node
*node
;
1486 unsigned char offset
;
1487 unsigned long *pivots
;
1489 type
= mte_node_type(mas
->node
);
1491 if (type
== maple_arange_64
)
1492 return ma_meta_end(node
, type
);
1494 pivots
= ma_pivots(node
, type
);
1495 if (unlikely(ma_dead_node(node
)))
1498 offset
= mt_pivots
[type
] - 1;
1499 if (likely(!pivots
[offset
]))
1500 return ma_meta_end(node
, type
);
1502 if (likely(pivots
[offset
] == mas
->max
))
1505 return mt_pivots
[type
];
1509 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1510 * @mas - the maple state
1512 * Return: The maximum gap in the leaf.
1514 static unsigned long mas_leaf_max_gap(struct ma_state
*mas
)
1517 unsigned long pstart
, gap
, max_gap
;
1518 struct maple_node
*mn
;
1519 unsigned long *pivots
;
1522 unsigned char max_piv
;
1524 mt
= mte_node_type(mas
->node
);
1526 slots
= ma_slots(mn
, mt
);
1528 if (unlikely(ma_is_dense(mt
))) {
1530 for (i
= 0; i
< mt_slots
[mt
]; i
++) {
1545 * Check the first implied pivot optimizes the loop below and slot 1 may
1546 * be skipped if there is a gap in slot 0.
1548 pivots
= ma_pivots(mn
, mt
);
1549 if (likely(!slots
[0])) {
1550 max_gap
= pivots
[0] - mas
->min
+ 1;
1556 /* reduce max_piv as the special case is checked before the loop */
1557 max_piv
= ma_data_end(mn
, mt
, pivots
, mas
->max
) - 1;
1559 * Check end implied pivot which can only be a gap on the right most
1562 if (unlikely(mas
->max
== ULONG_MAX
) && !slots
[max_piv
+ 1]) {
1563 gap
= ULONG_MAX
- pivots
[max_piv
];
1568 for (; i
<= max_piv
; i
++) {
1569 /* data == no gap. */
1570 if (likely(slots
[i
]))
1573 pstart
= pivots
[i
- 1];
1574 gap
= pivots
[i
] - pstart
;
1578 /* There cannot be two gaps in a row. */
1585 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1586 * @node: The maple node
1587 * @gaps: The pointer to the gaps
1588 * @mt: The maple node type
1589 * @*off: Pointer to store the offset location of the gap.
1591 * Uses the metadata data end to scan backwards across set gaps.
1593 * Return: The maximum gap value
1595 static inline unsigned long
1596 ma_max_gap(struct maple_node
*node
, unsigned long *gaps
, enum maple_type mt
,
1599 unsigned char offset
, i
;
1600 unsigned long max_gap
= 0;
1602 i
= offset
= ma_meta_end(node
, mt
);
1604 if (gaps
[i
] > max_gap
) {
1615 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1616 * @mas: The maple state.
1618 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1620 * Return: The gap value.
1622 static inline unsigned long mas_max_gap(struct ma_state
*mas
)
1624 unsigned long *gaps
;
1625 unsigned char offset
;
1627 struct maple_node
*node
;
1629 mt
= mte_node_type(mas
->node
);
1631 return mas_leaf_max_gap(mas
);
1634 offset
= ma_meta_gap(node
, mt
);
1635 if (offset
== MAPLE_ARANGE64_META_MAX
)
1638 gaps
= ma_gaps(node
, mt
);
1639 return gaps
[offset
];
1643 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1644 * @mas: The maple state
1645 * @offset: The gap offset in the parent to set
1646 * @new: The new gap value.
1648 * Set the parent gap then continue to set the gap upwards, using the metadata
1649 * of the parent to see if it is necessary to check the node above.
1651 static inline void mas_parent_gap(struct ma_state
*mas
, unsigned char offset
,
1654 unsigned long meta_gap
= 0;
1655 struct maple_node
*pnode
;
1656 struct maple_enode
*penode
;
1657 unsigned long *pgaps
;
1658 unsigned char meta_offset
;
1659 enum maple_type pmt
;
1661 pnode
= mte_parent(mas
->node
);
1662 pmt
= mas_parent_enum(mas
, mas
->node
);
1663 penode
= mt_mk_node(pnode
, pmt
);
1664 pgaps
= ma_gaps(pnode
, pmt
);
1667 meta_offset
= ma_meta_gap(pnode
, pmt
);
1668 if (meta_offset
== MAPLE_ARANGE64_META_MAX
)
1671 meta_gap
= pgaps
[meta_offset
];
1673 pgaps
[offset
] = new;
1675 if (meta_gap
== new)
1678 if (offset
!= meta_offset
) {
1682 ma_set_meta_gap(pnode
, pmt
, offset
);
1683 } else if (new < meta_gap
) {
1685 new = ma_max_gap(pnode
, pgaps
, pmt
, &meta_offset
);
1686 ma_set_meta_gap(pnode
, pmt
, meta_offset
);
1689 if (ma_is_root(pnode
))
1692 /* Go to the parent node. */
1693 pnode
= mte_parent(penode
);
1694 pmt
= mas_parent_enum(mas
, penode
);
1695 pgaps
= ma_gaps(pnode
, pmt
);
1696 offset
= mte_parent_slot(penode
);
1697 penode
= mt_mk_node(pnode
, pmt
);
1702 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1703 * @mas - the maple state.
1705 static inline void mas_update_gap(struct ma_state
*mas
)
1707 unsigned char pslot
;
1708 unsigned long p_gap
;
1709 unsigned long max_gap
;
1711 if (!mt_is_alloc(mas
->tree
))
1714 if (mte_is_root(mas
->node
))
1717 max_gap
= mas_max_gap(mas
);
1719 pslot
= mte_parent_slot(mas
->node
);
1720 p_gap
= ma_gaps(mte_parent(mas
->node
),
1721 mas_parent_enum(mas
, mas
->node
))[pslot
];
1723 if (p_gap
!= max_gap
)
1724 mas_parent_gap(mas
, pslot
, max_gap
);
1728 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1729 * @parent with the slot encoded.
1730 * @mas - the maple state (for the tree)
1731 * @parent - the maple encoded node containing the children.
1733 static inline void mas_adopt_children(struct ma_state
*mas
,
1734 struct maple_enode
*parent
)
1736 enum maple_type type
= mte_node_type(parent
);
1737 struct maple_node
*node
= mas_mn(mas
);
1738 void __rcu
**slots
= ma_slots(node
, type
);
1739 unsigned long *pivots
= ma_pivots(node
, type
);
1740 struct maple_enode
*child
;
1741 unsigned char offset
;
1743 offset
= ma_data_end(node
, type
, pivots
, mas
->max
);
1745 child
= mas_slot_locked(mas
, slots
, offset
);
1746 mte_set_parent(child
, parent
, offset
);
1751 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1752 * parent encoding to locate the maple node in the tree.
1753 * @mas - the ma_state to use for operations.
1754 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1755 * leave the node (true) and handle the adoption and free elsewhere.
1757 static inline void mas_replace(struct ma_state
*mas
, bool advanced
)
1758 __must_hold(mas
->tree
->lock
)
1760 struct maple_node
*mn
= mas_mn(mas
);
1761 struct maple_enode
*old_enode
;
1762 unsigned char offset
= 0;
1763 void __rcu
**slots
= NULL
;
1765 if (ma_is_root(mn
)) {
1766 old_enode
= mas_root_locked(mas
);
1768 offset
= mte_parent_slot(mas
->node
);
1769 slots
= ma_slots(mte_parent(mas
->node
),
1770 mas_parent_enum(mas
, mas
->node
));
1771 old_enode
= mas_slot_locked(mas
, slots
, offset
);
1774 if (!advanced
&& !mte_is_leaf(mas
->node
))
1775 mas_adopt_children(mas
, mas
->node
);
1777 if (mte_is_root(mas
->node
)) {
1778 mn
->parent
= ma_parent_ptr(
1779 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
1780 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
1781 mas_set_height(mas
);
1783 rcu_assign_pointer(slots
[offset
], mas
->node
);
1787 mte_set_node_dead(old_enode
);
1788 mas_free(mas
, old_enode
);
1793 * mas_new_child() - Find the new child of a node.
1794 * @mas: the maple state
1795 * @child: the maple state to store the child.
1797 static inline bool mas_new_child(struct ma_state
*mas
, struct ma_state
*child
)
1798 __must_hold(mas
->tree
->lock
)
1801 unsigned char offset
;
1803 unsigned long *pivots
;
1804 struct maple_enode
*entry
;
1805 struct maple_node
*node
;
1808 mt
= mte_node_type(mas
->node
);
1810 slots
= ma_slots(node
, mt
);
1811 pivots
= ma_pivots(node
, mt
);
1812 end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
1813 for (offset
= mas
->offset
; offset
<= end
; offset
++) {
1814 entry
= mas_slot_locked(mas
, slots
, offset
);
1815 if (mte_parent(entry
) == node
) {
1817 mas
->offset
= offset
+ 1;
1818 child
->offset
= offset
;
1828 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1829 * old data or set b_node->b_end.
1830 * @b_node: the maple_big_node
1831 * @shift: the shift count
1833 static inline void mab_shift_right(struct maple_big_node
*b_node
,
1834 unsigned char shift
)
1836 unsigned long size
= b_node
->b_end
* sizeof(unsigned long);
1838 memmove(b_node
->pivot
+ shift
, b_node
->pivot
, size
);
1839 memmove(b_node
->slot
+ shift
, b_node
->slot
, size
);
1840 if (b_node
->type
== maple_arange_64
)
1841 memmove(b_node
->gap
+ shift
, b_node
->gap
, size
);
1845 * mab_middle_node() - Check if a middle node is needed (unlikely)
1846 * @b_node: the maple_big_node that contains the data.
1847 * @size: the amount of data in the b_node
1848 * @split: the potential split location
1849 * @slot_count: the size that can be stored in a single node being considered.
1851 * Return: true if a middle node is required.
1853 static inline bool mab_middle_node(struct maple_big_node
*b_node
, int split
,
1854 unsigned char slot_count
)
1856 unsigned char size
= b_node
->b_end
;
1858 if (size
>= 2 * slot_count
)
1861 if (!b_node
->slot
[split
] && (size
>= 2 * slot_count
- 1))
1868 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1869 * @b_node: the maple_big_node with the data
1870 * @split: the suggested split location
1871 * @slot_count: the number of slots in the node being considered.
1873 * Return: the split location.
1875 static inline int mab_no_null_split(struct maple_big_node
*b_node
,
1876 unsigned char split
, unsigned char slot_count
)
1878 if (!b_node
->slot
[split
]) {
1880 * If the split is less than the max slot && the right side will
1881 * still be sufficient, then increment the split on NULL.
1883 if ((split
< slot_count
- 1) &&
1884 (b_node
->b_end
- split
) > (mt_min_slots
[b_node
->type
]))
1893 * mab_calc_split() - Calculate the split location and if there needs to be two
1895 * @bn: The maple_big_node with the data
1896 * @mid_split: The second split, if required. 0 otherwise.
1898 * Return: The first split location. The middle split is set in @mid_split.
1900 static inline int mab_calc_split(struct ma_state
*mas
,
1901 struct maple_big_node
*bn
, unsigned char *mid_split
, unsigned long min
)
1903 unsigned char b_end
= bn
->b_end
;
1904 int split
= b_end
/ 2; /* Assume equal split. */
1905 unsigned char slot_min
, slot_count
= mt_slots
[bn
->type
];
1908 * To support gap tracking, all NULL entries are kept together and a node cannot
1909 * end on a NULL entry, with the exception of the left-most leaf. The
1910 * limitation means that the split of a node must be checked for this condition
1911 * and be able to put more data in one direction or the other.
1913 if (unlikely((mas
->mas_flags
& MA_STATE_BULK
))) {
1915 split
= b_end
- mt_min_slots
[bn
->type
];
1917 if (!ma_is_leaf(bn
->type
))
1920 mas
->mas_flags
|= MA_STATE_REBALANCE
;
1921 if (!bn
->slot
[split
])
1927 * Although extremely rare, it is possible to enter what is known as the 3-way
1928 * split scenario. The 3-way split comes about by means of a store of a range
1929 * that overwrites the end and beginning of two full nodes. The result is a set
1930 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1931 * also be located in different parent nodes which are also full. This can
1932 * carry upwards all the way to the root in the worst case.
1934 if (unlikely(mab_middle_node(bn
, split
, slot_count
))) {
1936 *mid_split
= split
* 2;
1938 slot_min
= mt_min_slots
[bn
->type
];
1942 * Avoid having a range less than the slot count unless it
1943 * causes one node to be deficient.
1944 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1946 while (((bn
->pivot
[split
] - min
) < slot_count
- 1) &&
1947 (split
< slot_count
- 1) && (b_end
- split
> slot_min
))
1951 /* Avoid ending a node on a NULL entry */
1952 split
= mab_no_null_split(bn
, split
, slot_count
);
1954 if (unlikely(*mid_split
))
1955 *mid_split
= mab_no_null_split(bn
, *mid_split
, slot_count
);
1961 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1962 * and set @b_node->b_end to the next free slot.
1963 * @mas: The maple state
1964 * @mas_start: The starting slot to copy
1965 * @mas_end: The end slot to copy (inclusively)
1966 * @b_node: The maple_big_node to place the data
1967 * @mab_start: The starting location in maple_big_node to store the data.
1969 static inline void mas_mab_cp(struct ma_state
*mas
, unsigned char mas_start
,
1970 unsigned char mas_end
, struct maple_big_node
*b_node
,
1971 unsigned char mab_start
)
1974 struct maple_node
*node
;
1976 unsigned long *pivots
, *gaps
;
1977 int i
= mas_start
, j
= mab_start
;
1978 unsigned char piv_end
;
1981 mt
= mte_node_type(mas
->node
);
1982 pivots
= ma_pivots(node
, mt
);
1984 b_node
->pivot
[j
] = pivots
[i
++];
1985 if (unlikely(i
> mas_end
))
1990 piv_end
= min(mas_end
, mt_pivots
[mt
]);
1991 for (; i
< piv_end
; i
++, j
++) {
1992 b_node
->pivot
[j
] = pivots
[i
];
1993 if (unlikely(!b_node
->pivot
[j
]))
1996 if (unlikely(mas
->max
== b_node
->pivot
[j
]))
2000 if (likely(i
<= mas_end
))
2001 b_node
->pivot
[j
] = mas_safe_pivot(mas
, pivots
, i
, mt
);
2004 b_node
->b_end
= ++j
;
2006 slots
= ma_slots(node
, mt
);
2007 memcpy(b_node
->slot
+ mab_start
, slots
+ mas_start
, sizeof(void *) * j
);
2008 if (!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
)) {
2009 gaps
= ma_gaps(node
, mt
);
2010 memcpy(b_node
->gap
+ mab_start
, gaps
+ mas_start
,
2011 sizeof(unsigned long) * j
);
2016 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2017 * @mas: The maple state
2018 * @node: The maple node
2019 * @pivots: pointer to the maple node pivots
2020 * @mt: The maple type
2021 * @end: The assumed end
2023 * Note, end may be incremented within this function but not modified at the
2024 * source. This is fine since the metadata is the last thing to be stored in a
2025 * node during a write.
2027 static inline void mas_leaf_set_meta(struct ma_state
*mas
,
2028 struct maple_node
*node
, unsigned long *pivots
,
2029 enum maple_type mt
, unsigned char end
)
2031 /* There is no room for metadata already */
2032 if (mt_pivots
[mt
] <= end
)
2035 if (pivots
[end
] && pivots
[end
] < mas
->max
)
2038 if (end
< mt_slots
[mt
] - 1)
2039 ma_set_meta(node
, mt
, 0, end
);
2043 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2044 * @b_node: the maple_big_node that has the data
2045 * @mab_start: the start location in @b_node.
2046 * @mab_end: The end location in @b_node (inclusively)
2047 * @mas: The maple state with the maple encoded node.
2049 static inline void mab_mas_cp(struct maple_big_node
*b_node
,
2050 unsigned char mab_start
, unsigned char mab_end
,
2051 struct ma_state
*mas
, bool new_max
)
2054 enum maple_type mt
= mte_node_type(mas
->node
);
2055 struct maple_node
*node
= mte_to_node(mas
->node
);
2056 void __rcu
**slots
= ma_slots(node
, mt
);
2057 unsigned long *pivots
= ma_pivots(node
, mt
);
2058 unsigned long *gaps
= NULL
;
2061 if (mab_end
- mab_start
> mt_pivots
[mt
])
2064 if (!pivots
[mt_pivots
[mt
] - 1])
2065 slots
[mt_pivots
[mt
]] = NULL
;
2069 pivots
[j
++] = b_node
->pivot
[i
++];
2070 } while (i
<= mab_end
&& likely(b_node
->pivot
[i
]));
2072 memcpy(slots
, b_node
->slot
+ mab_start
,
2073 sizeof(void *) * (i
- mab_start
));
2076 mas
->max
= b_node
->pivot
[i
- 1];
2079 if (likely(!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
))) {
2080 unsigned long max_gap
= 0;
2081 unsigned char offset
= 15;
2083 gaps
= ma_gaps(node
, mt
);
2085 gaps
[--j
] = b_node
->gap
[--i
];
2086 if (gaps
[j
] > max_gap
) {
2092 ma_set_meta(node
, mt
, offset
, end
);
2094 mas_leaf_set_meta(mas
, node
, pivots
, mt
, end
);
2099 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2100 * @mas: the maple state with the maple encoded node of the sub-tree.
2102 * Descend through a sub-tree and adopt children who do not have the correct
2103 * parents set. Follow the parents which have the correct parents as they are
2104 * the new entries which need to be followed to find other incorrectly set
2107 static inline void mas_descend_adopt(struct ma_state
*mas
)
2109 struct ma_state list
[3], next
[3];
2113 * At each level there may be up to 3 correct parent pointers which indicates
2114 * the new nodes which need to be walked to find any new nodes at a lower level.
2117 for (i
= 0; i
< 3; i
++) {
2124 while (!mte_is_leaf(list
[0].node
)) {
2126 for (i
= 0; i
< 3; i
++) {
2127 if (mas_is_none(&list
[i
]))
2130 if (i
&& list
[i
-1].node
== list
[i
].node
)
2133 while ((n
< 3) && (mas_new_child(&list
[i
], &next
[n
])))
2136 mas_adopt_children(&list
[i
], list
[i
].node
);
2140 next
[n
++].node
= MAS_NONE
;
2142 /* descend by setting the list to the children */
2143 for (i
= 0; i
< 3; i
++)
2149 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2150 * @mas: The maple state
2151 * @end: The maple node end
2152 * @mt: The maple node type
2154 static inline void mas_bulk_rebalance(struct ma_state
*mas
, unsigned char end
,
2157 if (!(mas
->mas_flags
& MA_STATE_BULK
))
2160 if (mte_is_root(mas
->node
))
2163 if (end
> mt_min_slots
[mt
]) {
2164 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
2170 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2171 * data from a maple encoded node.
2172 * @wr_mas: the maple write state
2173 * @b_node: the maple_big_node to fill with data
2174 * @offset_end: the offset to end copying
2176 * Return: The actual end of the data stored in @b_node
2178 static noinline_for_kasan
void mas_store_b_node(struct ma_wr_state
*wr_mas
,
2179 struct maple_big_node
*b_node
, unsigned char offset_end
)
2182 unsigned char b_end
;
2183 /* Possible underflow of piv will wrap back to 0 before use. */
2185 struct ma_state
*mas
= wr_mas
->mas
;
2187 b_node
->type
= wr_mas
->type
;
2191 /* Copy start data up to insert. */
2192 mas_mab_cp(mas
, 0, slot
- 1, b_node
, 0);
2193 b_end
= b_node
->b_end
;
2194 piv
= b_node
->pivot
[b_end
- 1];
2198 if (piv
+ 1 < mas
->index
) {
2199 /* Handle range starting after old range */
2200 b_node
->slot
[b_end
] = wr_mas
->content
;
2201 if (!wr_mas
->content
)
2202 b_node
->gap
[b_end
] = mas
->index
- 1 - piv
;
2203 b_node
->pivot
[b_end
++] = mas
->index
- 1;
2206 /* Store the new entry. */
2207 mas
->offset
= b_end
;
2208 b_node
->slot
[b_end
] = wr_mas
->entry
;
2209 b_node
->pivot
[b_end
] = mas
->last
;
2212 if (mas
->last
>= mas
->max
)
2215 /* Handle new range ending before old range ends */
2216 piv
= mas_logical_pivot(mas
, wr_mas
->pivots
, offset_end
, wr_mas
->type
);
2217 if (piv
> mas
->last
) {
2218 if (piv
== ULONG_MAX
)
2219 mas_bulk_rebalance(mas
, b_node
->b_end
, wr_mas
->type
);
2221 if (offset_end
!= slot
)
2222 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
2225 b_node
->slot
[++b_end
] = wr_mas
->content
;
2226 if (!wr_mas
->content
)
2227 b_node
->gap
[b_end
] = piv
- mas
->last
+ 1;
2228 b_node
->pivot
[b_end
] = piv
;
2231 slot
= offset_end
+ 1;
2232 if (slot
> wr_mas
->node_end
)
2235 /* Copy end data to the end of the node. */
2236 mas_mab_cp(mas
, slot
, wr_mas
->node_end
+ 1, b_node
, ++b_end
);
2241 b_node
->b_end
= b_end
;
2245 * mas_prev_sibling() - Find the previous node with the same parent.
2246 * @mas: the maple state
2248 * Return: True if there is a previous sibling, false otherwise.
2250 static inline bool mas_prev_sibling(struct ma_state
*mas
)
2252 unsigned int p_slot
= mte_parent_slot(mas
->node
);
2254 if (mte_is_root(mas
->node
))
2261 mas
->offset
= p_slot
- 1;
2267 * mas_next_sibling() - Find the next node with the same parent.
2268 * @mas: the maple state
2270 * Return: true if there is a next sibling, false otherwise.
2272 static inline bool mas_next_sibling(struct ma_state
*mas
)
2274 MA_STATE(parent
, mas
->tree
, mas
->index
, mas
->last
);
2276 if (mte_is_root(mas
->node
))
2280 mas_ascend(&parent
);
2281 parent
.offset
= mte_parent_slot(mas
->node
) + 1;
2282 if (parent
.offset
> mas_data_end(&parent
))
2291 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2292 * @enode: The encoded maple node.
2294 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2296 * Return: @enode or MAS_NONE
2298 static inline struct maple_enode
*mte_node_or_none(struct maple_enode
*enode
)
2303 return ma_enode_ptr(MAS_NONE
);
2307 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2308 * @wr_mas: The maple write state
2310 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2312 static inline void mas_wr_node_walk(struct ma_wr_state
*wr_mas
)
2314 struct ma_state
*mas
= wr_mas
->mas
;
2315 unsigned char count
, offset
;
2317 if (unlikely(ma_is_dense(wr_mas
->type
))) {
2318 wr_mas
->r_max
= wr_mas
->r_min
= mas
->index
;
2319 mas
->offset
= mas
->index
= mas
->min
;
2323 wr_mas
->node
= mas_mn(wr_mas
->mas
);
2324 wr_mas
->pivots
= ma_pivots(wr_mas
->node
, wr_mas
->type
);
2325 count
= wr_mas
->node_end
= ma_data_end(wr_mas
->node
, wr_mas
->type
,
2326 wr_mas
->pivots
, mas
->max
);
2327 offset
= mas
->offset
;
2329 while (offset
< count
&& mas
->index
> wr_mas
->pivots
[offset
])
2332 wr_mas
->r_max
= offset
< count
? wr_mas
->pivots
[offset
] : mas
->max
;
2333 wr_mas
->r_min
= mas_safe_min(mas
, wr_mas
->pivots
, offset
);
2334 wr_mas
->offset_end
= mas
->offset
= offset
;
2338 * mas_topiary_range() - Add a range of slots to the topiary.
2339 * @mas: The maple state
2340 * @destroy: The topiary to add the slots (usually destroy)
2341 * @start: The starting slot inclusively
2342 * @end: The end slot inclusively
2344 static inline void mas_topiary_range(struct ma_state
*mas
,
2345 struct ma_topiary
*destroy
, unsigned char start
, unsigned char end
)
2348 unsigned char offset
;
2350 MT_BUG_ON(mas
->tree
, mte_is_leaf(mas
->node
));
2351 slots
= ma_slots(mas_mn(mas
), mte_node_type(mas
->node
));
2352 for (offset
= start
; offset
<= end
; offset
++) {
2353 struct maple_enode
*enode
= mas_slot_locked(mas
, slots
, offset
);
2355 if (mte_dead_node(enode
))
2358 mat_add(destroy
, enode
);
2363 * mast_topiary() - Add the portions of the tree to the removal list; either to
2364 * be freed or discarded (destroy walk).
2365 * @mast: The maple_subtree_state.
2367 static inline void mast_topiary(struct maple_subtree_state
*mast
)
2369 MA_WR_STATE(wr_mas
, mast
->orig_l
, NULL
);
2370 unsigned char r_start
, r_end
;
2371 unsigned char l_start
, l_end
;
2372 void __rcu
**l_slots
, **r_slots
;
2374 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2375 mast
->orig_l
->index
= mast
->orig_l
->last
;
2376 mas_wr_node_walk(&wr_mas
);
2377 l_start
= mast
->orig_l
->offset
+ 1;
2378 l_end
= mas_data_end(mast
->orig_l
);
2380 r_end
= mast
->orig_r
->offset
;
2385 l_slots
= ma_slots(mas_mn(mast
->orig_l
),
2386 mte_node_type(mast
->orig_l
->node
));
2388 r_slots
= ma_slots(mas_mn(mast
->orig_r
),
2389 mte_node_type(mast
->orig_r
->node
));
2391 if ((l_start
< l_end
) &&
2392 mte_dead_node(mas_slot_locked(mast
->orig_l
, l_slots
, l_start
))) {
2396 if (mte_dead_node(mas_slot_locked(mast
->orig_r
, r_slots
, r_end
))) {
2401 if ((l_start
> r_end
) && (mast
->orig_l
->node
== mast
->orig_r
->node
))
2404 /* At the node where left and right sides meet, add the parts between */
2405 if (mast
->orig_l
->node
== mast
->orig_r
->node
) {
2406 return mas_topiary_range(mast
->orig_l
, mast
->destroy
,
2410 /* mast->orig_r is different and consumed. */
2411 if (mte_is_leaf(mast
->orig_r
->node
))
2414 if (mte_dead_node(mas_slot_locked(mast
->orig_l
, l_slots
, l_end
)))
2418 if (l_start
<= l_end
)
2419 mas_topiary_range(mast
->orig_l
, mast
->destroy
, l_start
, l_end
);
2421 if (mte_dead_node(mas_slot_locked(mast
->orig_r
, r_slots
, r_start
)))
2424 if (r_start
<= r_end
)
2425 mas_topiary_range(mast
->orig_r
, mast
->destroy
, 0, r_end
);
2429 * mast_rebalance_next() - Rebalance against the next node
2430 * @mast: The maple subtree state
2431 * @old_r: The encoded maple node to the right (next node).
2433 static inline void mast_rebalance_next(struct maple_subtree_state
*mast
)
2435 unsigned char b_end
= mast
->bn
->b_end
;
2437 mas_mab_cp(mast
->orig_r
, 0, mt_slot_count(mast
->orig_r
->node
),
2439 mast
->orig_r
->last
= mast
->orig_r
->max
;
2443 * mast_rebalance_prev() - Rebalance against the previous node
2444 * @mast: The maple subtree state
2445 * @old_l: The encoded maple node to the left (previous node)
2447 static inline void mast_rebalance_prev(struct maple_subtree_state
*mast
)
2449 unsigned char end
= mas_data_end(mast
->orig_l
) + 1;
2450 unsigned char b_end
= mast
->bn
->b_end
;
2452 mab_shift_right(mast
->bn
, end
);
2453 mas_mab_cp(mast
->orig_l
, 0, end
- 1, mast
->bn
, 0);
2454 mast
->l
->min
= mast
->orig_l
->min
;
2455 mast
->orig_l
->index
= mast
->orig_l
->min
;
2456 mast
->bn
->b_end
= end
+ b_end
;
2457 mast
->l
->offset
+= end
;
2461 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2462 * the node to the right. Checking the nodes to the right then the left at each
2463 * level upwards until root is reached. Free and destroy as needed.
2464 * Data is copied into the @mast->bn.
2465 * @mast: The maple_subtree_state.
2468 bool mast_spanning_rebalance(struct maple_subtree_state
*mast
)
2470 struct ma_state r_tmp
= *mast
->orig_r
;
2471 struct ma_state l_tmp
= *mast
->orig_l
;
2472 struct maple_enode
*ancestor
= NULL
;
2473 unsigned char start
, end
;
2474 unsigned char depth
= 0;
2476 r_tmp
= *mast
->orig_r
;
2477 l_tmp
= *mast
->orig_l
;
2479 mas_ascend(mast
->orig_r
);
2480 mas_ascend(mast
->orig_l
);
2483 (mast
->orig_r
->node
== mast
->orig_l
->node
)) {
2484 ancestor
= mast
->orig_r
->node
;
2485 end
= mast
->orig_r
->offset
- 1;
2486 start
= mast
->orig_l
->offset
+ 1;
2489 if (mast
->orig_r
->offset
< mas_data_end(mast
->orig_r
)) {
2491 ancestor
= mast
->orig_r
->node
;
2495 mast
->orig_r
->offset
++;
2497 mas_descend(mast
->orig_r
);
2498 mast
->orig_r
->offset
= 0;
2502 mast_rebalance_next(mast
);
2504 unsigned char l_off
= 0;
2505 struct maple_enode
*child
= r_tmp
.node
;
2508 if (ancestor
== r_tmp
.node
)
2514 if (l_off
< r_tmp
.offset
)
2515 mas_topiary_range(&r_tmp
, mast
->destroy
,
2516 l_off
, r_tmp
.offset
);
2518 if (l_tmp
.node
!= child
)
2519 mat_add(mast
->free
, child
);
2521 } while (r_tmp
.node
!= ancestor
);
2523 *mast
->orig_l
= l_tmp
;
2526 } else if (mast
->orig_l
->offset
!= 0) {
2528 ancestor
= mast
->orig_l
->node
;
2529 end
= mas_data_end(mast
->orig_l
);
2532 mast
->orig_l
->offset
--;
2534 mas_descend(mast
->orig_l
);
2535 mast
->orig_l
->offset
=
2536 mas_data_end(mast
->orig_l
);
2540 mast_rebalance_prev(mast
);
2542 unsigned char r_off
;
2543 struct maple_enode
*child
= l_tmp
.node
;
2546 if (ancestor
== l_tmp
.node
)
2549 r_off
= mas_data_end(&l_tmp
);
2551 if (l_tmp
.offset
< r_off
)
2554 if (l_tmp
.offset
< r_off
)
2555 mas_topiary_range(&l_tmp
, mast
->destroy
,
2556 l_tmp
.offset
, r_off
);
2558 if (r_tmp
.node
!= child
)
2559 mat_add(mast
->free
, child
);
2561 } while (l_tmp
.node
!= ancestor
);
2563 *mast
->orig_r
= r_tmp
;
2566 } while (!mte_is_root(mast
->orig_r
->node
));
2568 *mast
->orig_r
= r_tmp
;
2569 *mast
->orig_l
= l_tmp
;
2574 * mast_ascend_free() - Add current original maple state nodes to the free list
2576 * @mast: the maple subtree state.
2578 * Ascend the original left and right sides and add the previous nodes to the
2579 * free list. Set the slots to point to the correct location in the new nodes.
2582 mast_ascend_free(struct maple_subtree_state
*mast
)
2584 MA_WR_STATE(wr_mas
, mast
->orig_r
, NULL
);
2585 struct maple_enode
*left
= mast
->orig_l
->node
;
2586 struct maple_enode
*right
= mast
->orig_r
->node
;
2588 mas_ascend(mast
->orig_l
);
2589 mas_ascend(mast
->orig_r
);
2590 mat_add(mast
->free
, left
);
2593 mat_add(mast
->free
, right
);
2595 mast
->orig_r
->offset
= 0;
2596 mast
->orig_r
->index
= mast
->r
->max
;
2597 /* last should be larger than or equal to index */
2598 if (mast
->orig_r
->last
< mast
->orig_r
->index
)
2599 mast
->orig_r
->last
= mast
->orig_r
->index
;
2601 * The node may not contain the value so set slot to ensure all
2602 * of the nodes contents are freed or destroyed.
2604 wr_mas
.type
= mte_node_type(mast
->orig_r
->node
);
2605 mas_wr_node_walk(&wr_mas
);
2606 /* Set up the left side of things */
2607 mast
->orig_l
->offset
= 0;
2608 mast
->orig_l
->index
= mast
->l
->min
;
2609 wr_mas
.mas
= mast
->orig_l
;
2610 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2611 mas_wr_node_walk(&wr_mas
);
2613 mast
->bn
->type
= wr_mas
.type
;
2617 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2618 * @mas: the maple state with the allocations.
2619 * @b_node: the maple_big_node with the type encoding.
2621 * Use the node type from the maple_big_node to allocate a new node from the
2622 * ma_state. This function exists mainly for code readability.
2624 * Return: A new maple encoded node
2626 static inline struct maple_enode
2627 *mas_new_ma_node(struct ma_state
*mas
, struct maple_big_node
*b_node
)
2629 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)), b_node
->type
);
2633 * mas_mab_to_node() - Set up right and middle nodes
2635 * @mas: the maple state that contains the allocations.
2636 * @b_node: the node which contains the data.
2637 * @left: The pointer which will have the left node
2638 * @right: The pointer which may have the right node
2639 * @middle: the pointer which may have the middle node (rare)
2640 * @mid_split: the split location for the middle node
2642 * Return: the split of left.
2644 static inline unsigned char mas_mab_to_node(struct ma_state
*mas
,
2645 struct maple_big_node
*b_node
, struct maple_enode
**left
,
2646 struct maple_enode
**right
, struct maple_enode
**middle
,
2647 unsigned char *mid_split
, unsigned long min
)
2649 unsigned char split
= 0;
2650 unsigned char slot_count
= mt_slots
[b_node
->type
];
2652 *left
= mas_new_ma_node(mas
, b_node
);
2657 if (b_node
->b_end
< slot_count
) {
2658 split
= b_node
->b_end
;
2660 split
= mab_calc_split(mas
, b_node
, mid_split
, min
);
2661 *right
= mas_new_ma_node(mas
, b_node
);
2665 *middle
= mas_new_ma_node(mas
, b_node
);
2672 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2674 * @b_node - the big node to add the entry
2675 * @mas - the maple state to get the pivot (mas->max)
2676 * @entry - the entry to add, if NULL nothing happens.
2678 static inline void mab_set_b_end(struct maple_big_node
*b_node
,
2679 struct ma_state
*mas
,
2685 b_node
->slot
[b_node
->b_end
] = entry
;
2686 if (mt_is_alloc(mas
->tree
))
2687 b_node
->gap
[b_node
->b_end
] = mas_max_gap(mas
);
2688 b_node
->pivot
[b_node
->b_end
++] = mas
->max
;
2692 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2693 * of @mas->node to either @left or @right, depending on @slot and @split
2695 * @mas - the maple state with the node that needs a parent
2696 * @left - possible parent 1
2697 * @right - possible parent 2
2698 * @slot - the slot the mas->node was placed
2699 * @split - the split location between @left and @right
2701 static inline void mas_set_split_parent(struct ma_state
*mas
,
2702 struct maple_enode
*left
,
2703 struct maple_enode
*right
,
2704 unsigned char *slot
, unsigned char split
)
2706 if (mas_is_none(mas
))
2709 if ((*slot
) <= split
)
2710 mte_set_parent(mas
->node
, left
, *slot
);
2712 mte_set_parent(mas
->node
, right
, (*slot
) - split
- 1);
2718 * mte_mid_split_check() - Check if the next node passes the mid-split
2719 * @**l: Pointer to left encoded maple node.
2720 * @**m: Pointer to middle encoded maple node.
2721 * @**r: Pointer to right encoded maple node.
2723 * @*split: The split location.
2724 * @mid_split: The middle split.
2726 static inline void mte_mid_split_check(struct maple_enode
**l
,
2727 struct maple_enode
**r
,
2728 struct maple_enode
*right
,
2730 unsigned char *split
,
2731 unsigned char mid_split
)
2736 if (slot
< mid_split
)
2745 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2746 * is taken from @mast->l.
2747 * @mast - the maple subtree state
2748 * @left - the left node
2749 * @right - the right node
2750 * @split - the split location.
2752 static inline void mast_set_split_parents(struct maple_subtree_state
*mast
,
2753 struct maple_enode
*left
,
2754 struct maple_enode
*middle
,
2755 struct maple_enode
*right
,
2756 unsigned char split
,
2757 unsigned char mid_split
)
2760 struct maple_enode
*l
= left
;
2761 struct maple_enode
*r
= right
;
2763 if (mas_is_none(mast
->l
))
2769 slot
= mast
->l
->offset
;
2771 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2772 mas_set_split_parent(mast
->l
, l
, r
, &slot
, split
);
2774 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2775 mas_set_split_parent(mast
->m
, l
, r
, &slot
, split
);
2777 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2778 mas_set_split_parent(mast
->r
, l
, r
, &slot
, split
);
2782 * mas_wmb_replace() - Write memory barrier and replace
2783 * @mas: The maple state
2784 * @free: the maple topiary list of nodes to free
2785 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2787 * Updates gap as necessary.
2789 static inline void mas_wmb_replace(struct ma_state
*mas
,
2790 struct ma_topiary
*free
,
2791 struct ma_topiary
*destroy
)
2793 /* All nodes must see old data as dead prior to replacing that data */
2794 smp_wmb(); /* Needed for RCU */
2796 /* Insert the new data in the tree */
2797 mas_replace(mas
, true);
2799 if (!mte_is_leaf(mas
->node
))
2800 mas_descend_adopt(mas
);
2802 mas_mat_free(mas
, free
);
2805 mas_mat_destroy(mas
, destroy
);
2807 if (mte_is_leaf(mas
->node
))
2810 mas_update_gap(mas
);
2814 * mast_new_root() - Set a new tree root during subtree creation
2815 * @mast: The maple subtree state
2816 * @mas: The maple state
2818 static inline void mast_new_root(struct maple_subtree_state
*mast
,
2819 struct ma_state
*mas
)
2821 mas_mn(mast
->l
)->parent
=
2822 ma_parent_ptr(((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
2823 if (!mte_dead_node(mast
->orig_l
->node
) &&
2824 !mte_is_root(mast
->orig_l
->node
)) {
2826 mast_ascend_free(mast
);
2828 } while (!mte_is_root(mast
->orig_l
->node
));
2830 if ((mast
->orig_l
->node
!= mas
->node
) &&
2831 (mast
->l
->depth
> mas_mt_height(mas
))) {
2832 mat_add(mast
->free
, mas
->node
);
2837 * mast_cp_to_nodes() - Copy data out to nodes.
2838 * @mast: The maple subtree state
2839 * @left: The left encoded maple node
2840 * @middle: The middle encoded maple node
2841 * @right: The right encoded maple node
2842 * @split: The location to split between left and (middle ? middle : right)
2843 * @mid_split: The location to split between middle and right.
2845 static inline void mast_cp_to_nodes(struct maple_subtree_state
*mast
,
2846 struct maple_enode
*left
, struct maple_enode
*middle
,
2847 struct maple_enode
*right
, unsigned char split
, unsigned char mid_split
)
2849 bool new_lmax
= true;
2851 mast
->l
->node
= mte_node_or_none(left
);
2852 mast
->m
->node
= mte_node_or_none(middle
);
2853 mast
->r
->node
= mte_node_or_none(right
);
2855 mast
->l
->min
= mast
->orig_l
->min
;
2856 if (split
== mast
->bn
->b_end
) {
2857 mast
->l
->max
= mast
->orig_r
->max
;
2861 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, new_lmax
);
2864 mab_mas_cp(mast
->bn
, 1 + split
, mid_split
, mast
->m
, true);
2865 mast
->m
->min
= mast
->bn
->pivot
[split
] + 1;
2869 mast
->r
->max
= mast
->orig_r
->max
;
2871 mab_mas_cp(mast
->bn
, 1 + split
, mast
->bn
->b_end
, mast
->r
, false);
2872 mast
->r
->min
= mast
->bn
->pivot
[split
] + 1;
2877 * mast_combine_cp_left - Copy in the original left side of the tree into the
2878 * combined data set in the maple subtree state big node.
2879 * @mast: The maple subtree state
2881 static inline void mast_combine_cp_left(struct maple_subtree_state
*mast
)
2883 unsigned char l_slot
= mast
->orig_l
->offset
;
2888 mas_mab_cp(mast
->orig_l
, 0, l_slot
- 1, mast
->bn
, 0);
2892 * mast_combine_cp_right: Copy in the original right side of the tree into the
2893 * combined data set in the maple subtree state big node.
2894 * @mast: The maple subtree state
2896 static inline void mast_combine_cp_right(struct maple_subtree_state
*mast
)
2898 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] >= mast
->orig_r
->max
)
2901 mas_mab_cp(mast
->orig_r
, mast
->orig_r
->offset
+ 1,
2902 mt_slot_count(mast
->orig_r
->node
), mast
->bn
,
2904 mast
->orig_r
->last
= mast
->orig_r
->max
;
2908 * mast_sufficient: Check if the maple subtree state has enough data in the big
2909 * node to create at least one sufficient node
2910 * @mast: the maple subtree state
2912 static inline bool mast_sufficient(struct maple_subtree_state
*mast
)
2914 if (mast
->bn
->b_end
> mt_min_slot_count(mast
->orig_l
->node
))
2921 * mast_overflow: Check if there is too much data in the subtree state for a
2923 * @mast: The maple subtree state
2925 static inline bool mast_overflow(struct maple_subtree_state
*mast
)
2927 if (mast
->bn
->b_end
>= mt_slot_count(mast
->orig_l
->node
))
2933 static inline void *mtree_range_walk(struct ma_state
*mas
)
2935 unsigned long *pivots
;
2936 unsigned char offset
;
2937 struct maple_node
*node
;
2938 struct maple_enode
*next
, *last
;
2939 enum maple_type type
;
2942 unsigned long max
, min
;
2943 unsigned long prev_max
, prev_min
;
2951 node
= mte_to_node(next
);
2952 type
= mte_node_type(next
);
2953 pivots
= ma_pivots(node
, type
);
2954 end
= ma_data_end(node
, type
, pivots
, max
);
2955 if (unlikely(ma_dead_node(node
)))
2958 if (pivots
[offset
] >= mas
->index
) {
2961 max
= pivots
[offset
];
2967 } while ((offset
< end
) && (pivots
[offset
] < mas
->index
));
2970 min
= pivots
[offset
- 1] + 1;
2972 if (likely(offset
< end
&& pivots
[offset
]))
2973 max
= pivots
[offset
];
2976 slots
= ma_slots(node
, type
);
2977 next
= mt_slot(mas
->tree
, slots
, offset
);
2978 if (unlikely(ma_dead_node(node
)))
2980 } while (!ma_is_leaf(type
));
2982 mas
->offset
= offset
;
2985 mas
->min
= prev_min
;
2986 mas
->max
= prev_max
;
2988 return (void *)next
;
2996 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2997 * @mas: The starting maple state
2998 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2999 * @count: The estimated count of iterations needed.
3001 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3002 * is hit. First @b_node is split into two entries which are inserted into the
3003 * next iteration of the loop. @b_node is returned populated with the final
3004 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3005 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3006 * to account of what has been copied into the new sub-tree. The update of
3007 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3008 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3009 * the new sub-tree in case the sub-tree becomes the full tree.
3011 * Return: the number of elements in b_node during the last loop.
3013 static int mas_spanning_rebalance(struct ma_state
*mas
,
3014 struct maple_subtree_state
*mast
, unsigned char count
)
3016 unsigned char split
, mid_split
;
3017 unsigned char slot
= 0;
3018 struct maple_enode
*left
= NULL
, *middle
= NULL
, *right
= NULL
;
3020 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->index
);
3021 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3022 MA_STATE(m_mas
, mas
->tree
, mas
->index
, mas
->index
);
3023 MA_TOPIARY(free
, mas
->tree
);
3024 MA_TOPIARY(destroy
, mas
->tree
);
3027 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3028 * Rebalancing is done by use of the ``struct maple_topiary``.
3034 mast
->destroy
= &destroy
;
3035 l_mas
.node
= r_mas
.node
= m_mas
.node
= MAS_NONE
;
3037 /* Check if this is not root and has sufficient data. */
3038 if (((mast
->orig_l
->min
!= 0) || (mast
->orig_r
->max
!= ULONG_MAX
)) &&
3039 unlikely(mast
->bn
->b_end
<= mt_min_slots
[mast
->bn
->type
]))
3040 mast_spanning_rebalance(mast
);
3042 mast
->orig_l
->depth
= 0;
3045 * Each level of the tree is examined and balanced, pushing data to the left or
3046 * right, or rebalancing against left or right nodes is employed to avoid
3047 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3048 * the tree is created, there may be a mix of new and old nodes. The old nodes
3049 * will have the incorrect parent pointers and currently be in two trees: the
3050 * original tree and the partially new tree. To remedy the parent pointers in
3051 * the old tree, the new data is swapped into the active tree and a walk down
3052 * the tree is performed and the parent pointers are updated.
3053 * See mas_descend_adopt() for more information..
3057 mast
->bn
->type
= mte_node_type(mast
->orig_l
->node
);
3058 split
= mas_mab_to_node(mas
, mast
->bn
, &left
, &right
, &middle
,
3059 &mid_split
, mast
->orig_l
->min
);
3060 mast_set_split_parents(mast
, left
, middle
, right
, split
,
3062 mast_cp_to_nodes(mast
, left
, middle
, right
, split
, mid_split
);
3065 * Copy data from next level in the tree to mast->bn from next
3068 memset(mast
->bn
, 0, sizeof(struct maple_big_node
));
3069 mast
->bn
->type
= mte_node_type(left
);
3070 mast
->orig_l
->depth
++;
3072 /* Root already stored in l->node. */
3073 if (mas_is_root_limits(mast
->l
))
3076 mast_ascend_free(mast
);
3077 mast_combine_cp_left(mast
);
3078 l_mas
.offset
= mast
->bn
->b_end
;
3079 mab_set_b_end(mast
->bn
, &l_mas
, left
);
3080 mab_set_b_end(mast
->bn
, &m_mas
, middle
);
3081 mab_set_b_end(mast
->bn
, &r_mas
, right
);
3083 /* Copy anything necessary out of the right node. */
3084 mast_combine_cp_right(mast
);
3086 mast
->orig_l
->last
= mast
->orig_l
->max
;
3088 if (mast_sufficient(mast
))
3091 if (mast_overflow(mast
))
3094 /* May be a new root stored in mast->bn */
3095 if (mas_is_root_limits(mast
->orig_l
))
3098 mast_spanning_rebalance(mast
);
3100 /* rebalancing from other nodes may require another loop. */
3105 l_mas
.node
= mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)),
3106 mte_node_type(mast
->orig_l
->node
));
3107 mast
->orig_l
->depth
++;
3108 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, &l_mas
, true);
3109 mte_set_parent(left
, l_mas
.node
, slot
);
3111 mte_set_parent(middle
, l_mas
.node
, ++slot
);
3114 mte_set_parent(right
, l_mas
.node
, ++slot
);
3116 if (mas_is_root_limits(mast
->l
)) {
3118 mast_new_root(mast
, mas
);
3120 mas_mn(&l_mas
)->parent
= mas_mn(mast
->orig_l
)->parent
;
3123 if (!mte_dead_node(mast
->orig_l
->node
))
3124 mat_add(&free
, mast
->orig_l
->node
);
3126 mas
->depth
= mast
->orig_l
->depth
;
3127 *mast
->orig_l
= l_mas
;
3128 mte_set_node_dead(mas
->node
);
3130 /* Set up mas for insertion. */
3131 mast
->orig_l
->depth
= mas
->depth
;
3132 mast
->orig_l
->alloc
= mas
->alloc
;
3133 *mas
= *mast
->orig_l
;
3134 mas_wmb_replace(mas
, &free
, &destroy
);
3135 mtree_range_walk(mas
);
3136 return mast
->bn
->b_end
;
3140 * mas_rebalance() - Rebalance a given node.
3141 * @mas: The maple state
3142 * @b_node: The big maple node.
3144 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3145 * Continue upwards until tree is sufficient.
3147 * Return: the number of elements in b_node during the last loop.
3149 static inline int mas_rebalance(struct ma_state
*mas
,
3150 struct maple_big_node
*b_node
)
3152 char empty_count
= mas_mt_height(mas
);
3153 struct maple_subtree_state mast
;
3154 unsigned char shift
, b_end
= ++b_node
->b_end
;
3156 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3157 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3159 trace_ma_op(__func__
, mas
);
3162 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3163 * against the node to the right if it exists, otherwise the node to the
3164 * left of this node is rebalanced against this node. If rebalancing
3165 * causes just one node to be produced instead of two, then the parent
3166 * is also examined and rebalanced if it is insufficient. Every level
3167 * tries to combine the data in the same way. If one node contains the
3168 * entire range of the tree, then that node is used as a new root node.
3170 mas_node_count(mas
, 1 + empty_count
* 3);
3171 if (mas_is_err(mas
))
3174 mast
.orig_l
= &l_mas
;
3175 mast
.orig_r
= &r_mas
;
3177 mast
.bn
->type
= mte_node_type(mas
->node
);
3179 l_mas
= r_mas
= *mas
;
3181 if (mas_next_sibling(&r_mas
)) {
3182 mas_mab_cp(&r_mas
, 0, mt_slot_count(r_mas
.node
), b_node
, b_end
);
3183 r_mas
.last
= r_mas
.index
= r_mas
.max
;
3185 mas_prev_sibling(&l_mas
);
3186 shift
= mas_data_end(&l_mas
) + 1;
3187 mab_shift_right(b_node
, shift
);
3188 mas
->offset
+= shift
;
3189 mas_mab_cp(&l_mas
, 0, shift
- 1, b_node
, 0);
3190 b_node
->b_end
= shift
+ b_end
;
3191 l_mas
.index
= l_mas
.last
= l_mas
.min
;
3194 return mas_spanning_rebalance(mas
, &mast
, empty_count
);
3198 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3200 * @mas: The maple state
3201 * @end: The end of the left-most node.
3203 * During a mass-insert event (such as forking), it may be necessary to
3204 * rebalance the left-most node when it is not sufficient.
3206 static inline void mas_destroy_rebalance(struct ma_state
*mas
, unsigned char end
)
3208 enum maple_type mt
= mte_node_type(mas
->node
);
3209 struct maple_node reuse
, *newnode
, *parent
, *new_left
, *left
, *node
;
3210 struct maple_enode
*eparent
;
3211 unsigned char offset
, tmp
, split
= mt_slots
[mt
] / 2;
3212 void __rcu
**l_slots
, **slots
;
3213 unsigned long *l_pivs
, *pivs
, gap
;
3214 bool in_rcu
= mt_in_rcu(mas
->tree
);
3216 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3219 mas_prev_sibling(&l_mas
);
3223 /* Allocate for both left and right as well as parent. */
3224 mas_node_count(mas
, 3);
3225 if (mas_is_err(mas
))
3228 newnode
= mas_pop_node(mas
);
3234 newnode
->parent
= node
->parent
;
3235 slots
= ma_slots(newnode
, mt
);
3236 pivs
= ma_pivots(newnode
, mt
);
3237 left
= mas_mn(&l_mas
);
3238 l_slots
= ma_slots(left
, mt
);
3239 l_pivs
= ma_pivots(left
, mt
);
3240 if (!l_slots
[split
])
3242 tmp
= mas_data_end(&l_mas
) - split
;
3244 memcpy(slots
, l_slots
+ split
+ 1, sizeof(void *) * tmp
);
3245 memcpy(pivs
, l_pivs
+ split
+ 1, sizeof(unsigned long) * tmp
);
3246 pivs
[tmp
] = l_mas
.max
;
3247 memcpy(slots
+ tmp
, ma_slots(node
, mt
), sizeof(void *) * end
);
3248 memcpy(pivs
+ tmp
, ma_pivots(node
, mt
), sizeof(unsigned long) * end
);
3250 l_mas
.max
= l_pivs
[split
];
3251 mas
->min
= l_mas
.max
+ 1;
3252 eparent
= mt_mk_node(mte_parent(l_mas
.node
),
3253 mas_parent_enum(&l_mas
, l_mas
.node
));
3256 unsigned char max_p
= mt_pivots
[mt
];
3257 unsigned char max_s
= mt_slots
[mt
];
3260 memset(pivs
+ tmp
, 0,
3261 sizeof(unsigned long) * (max_p
- tmp
));
3263 if (tmp
< mt_slots
[mt
])
3264 memset(slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3266 memcpy(node
, newnode
, sizeof(struct maple_node
));
3267 ma_set_meta(node
, mt
, 0, tmp
- 1);
3268 mte_set_pivot(eparent
, mte_parent_slot(l_mas
.node
),
3271 /* Remove data from l_pivs. */
3273 memset(l_pivs
+ tmp
, 0, sizeof(unsigned long) * (max_p
- tmp
));
3274 memset(l_slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3275 ma_set_meta(left
, mt
, 0, split
);
3280 /* RCU requires replacing both l_mas, mas, and parent. */
3281 mas
->node
= mt_mk_node(newnode
, mt
);
3282 ma_set_meta(newnode
, mt
, 0, tmp
);
3284 new_left
= mas_pop_node(mas
);
3285 new_left
->parent
= left
->parent
;
3286 mt
= mte_node_type(l_mas
.node
);
3287 slots
= ma_slots(new_left
, mt
);
3288 pivs
= ma_pivots(new_left
, mt
);
3289 memcpy(slots
, l_slots
, sizeof(void *) * split
);
3290 memcpy(pivs
, l_pivs
, sizeof(unsigned long) * split
);
3291 ma_set_meta(new_left
, mt
, 0, split
);
3292 l_mas
.node
= mt_mk_node(new_left
, mt
);
3294 /* replace parent. */
3295 offset
= mte_parent_slot(mas
->node
);
3296 mt
= mas_parent_enum(&l_mas
, l_mas
.node
);
3297 parent
= mas_pop_node(mas
);
3298 slots
= ma_slots(parent
, mt
);
3299 pivs
= ma_pivots(parent
, mt
);
3300 memcpy(parent
, mte_to_node(eparent
), sizeof(struct maple_node
));
3301 rcu_assign_pointer(slots
[offset
], mas
->node
);
3302 rcu_assign_pointer(slots
[offset
- 1], l_mas
.node
);
3303 pivs
[offset
- 1] = l_mas
.max
;
3304 eparent
= mt_mk_node(parent
, mt
);
3306 gap
= mas_leaf_max_gap(mas
);
3307 mte_set_gap(eparent
, mte_parent_slot(mas
->node
), gap
);
3308 gap
= mas_leaf_max_gap(&l_mas
);
3309 mte_set_gap(eparent
, mte_parent_slot(l_mas
.node
), gap
);
3313 mas_replace(mas
, false);
3315 mas_update_gap(mas
);
3319 * mas_split_final_node() - Split the final node in a subtree operation.
3320 * @mast: the maple subtree state
3321 * @mas: The maple state
3322 * @height: The height of the tree in case it's a new root.
3324 static inline bool mas_split_final_node(struct maple_subtree_state
*mast
,
3325 struct ma_state
*mas
, int height
)
3327 struct maple_enode
*ancestor
;
3329 if (mte_is_root(mas
->node
)) {
3330 if (mt_is_alloc(mas
->tree
))
3331 mast
->bn
->type
= maple_arange_64
;
3333 mast
->bn
->type
= maple_range_64
;
3334 mas
->depth
= height
;
3337 * Only a single node is used here, could be root.
3338 * The Big_node data should just fit in a single node.
3340 ancestor
= mas_new_ma_node(mas
, mast
->bn
);
3341 mte_set_parent(mast
->l
->node
, ancestor
, mast
->l
->offset
);
3342 mte_set_parent(mast
->r
->node
, ancestor
, mast
->r
->offset
);
3343 mte_to_node(ancestor
)->parent
= mas_mn(mas
)->parent
;
3345 mast
->l
->node
= ancestor
;
3346 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, mast
->l
, true);
3347 mas
->offset
= mast
->bn
->b_end
- 1;
3352 * mast_fill_bnode() - Copy data into the big node in the subtree state
3353 * @mast: The maple subtree state
3354 * @mas: the maple state
3355 * @skip: The number of entries to skip for new nodes insertion.
3357 static inline void mast_fill_bnode(struct maple_subtree_state
*mast
,
3358 struct ma_state
*mas
,
3362 struct maple_enode
*old
= mas
->node
;
3363 unsigned char split
;
3365 memset(mast
->bn
->gap
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->gap
));
3366 memset(mast
->bn
->slot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->slot
));
3367 memset(mast
->bn
->pivot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->pivot
));
3368 mast
->bn
->b_end
= 0;
3370 if (mte_is_root(mas
->node
)) {
3374 mat_add(mast
->free
, old
);
3375 mas
->offset
= mte_parent_slot(mas
->node
);
3378 if (cp
&& mast
->l
->offset
)
3379 mas_mab_cp(mas
, 0, mast
->l
->offset
- 1, mast
->bn
, 0);
3381 split
= mast
->bn
->b_end
;
3382 mab_set_b_end(mast
->bn
, mast
->l
, mast
->l
->node
);
3383 mast
->r
->offset
= mast
->bn
->b_end
;
3384 mab_set_b_end(mast
->bn
, mast
->r
, mast
->r
->node
);
3385 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] == mas
->max
)
3389 mas_mab_cp(mas
, split
+ skip
, mt_slot_count(mas
->node
) - 1,
3390 mast
->bn
, mast
->bn
->b_end
);
3393 mast
->bn
->type
= mte_node_type(mas
->node
);
3397 * mast_split_data() - Split the data in the subtree state big node into regular
3399 * @mast: The maple subtree state
3400 * @mas: The maple state
3401 * @split: The location to split the big node
3403 static inline void mast_split_data(struct maple_subtree_state
*mast
,
3404 struct ma_state
*mas
, unsigned char split
)
3406 unsigned char p_slot
;
3408 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, true);
3409 mte_set_pivot(mast
->r
->node
, 0, mast
->r
->max
);
3410 mab_mas_cp(mast
->bn
, split
+ 1, mast
->bn
->b_end
, mast
->r
, false);
3411 mast
->l
->offset
= mte_parent_slot(mas
->node
);
3412 mast
->l
->max
= mast
->bn
->pivot
[split
];
3413 mast
->r
->min
= mast
->l
->max
+ 1;
3414 if (mte_is_leaf(mas
->node
))
3417 p_slot
= mast
->orig_l
->offset
;
3418 mas_set_split_parent(mast
->orig_l
, mast
->l
->node
, mast
->r
->node
,
3420 mas_set_split_parent(mast
->orig_r
, mast
->l
->node
, mast
->r
->node
,
3425 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3426 * data to the right or left node if there is room.
3427 * @mas: The maple state
3428 * @height: The current height of the maple state
3429 * @mast: The maple subtree state
3430 * @left: Push left or not.
3432 * Keeping the height of the tree low means faster lookups.
3434 * Return: True if pushed, false otherwise.
3436 static inline bool mas_push_data(struct ma_state
*mas
, int height
,
3437 struct maple_subtree_state
*mast
, bool left
)
3439 unsigned char slot_total
= mast
->bn
->b_end
;
3440 unsigned char end
, space
, split
;
3442 MA_STATE(tmp_mas
, mas
->tree
, mas
->index
, mas
->last
);
3444 tmp_mas
.depth
= mast
->l
->depth
;
3446 if (left
&& !mas_prev_sibling(&tmp_mas
))
3448 else if (!left
&& !mas_next_sibling(&tmp_mas
))
3451 end
= mas_data_end(&tmp_mas
);
3453 space
= 2 * mt_slot_count(mas
->node
) - 2;
3454 /* -2 instead of -1 to ensure there isn't a triple split */
3455 if (ma_is_leaf(mast
->bn
->type
))
3458 if (mas
->max
== ULONG_MAX
)
3461 if (slot_total
>= space
)
3464 /* Get the data; Fill mast->bn */
3467 mab_shift_right(mast
->bn
, end
+ 1);
3468 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, 0);
3469 mast
->bn
->b_end
= slot_total
+ 1;
3471 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, mast
->bn
->b_end
);
3474 /* Configure mast for splitting of mast->bn */
3475 split
= mt_slots
[mast
->bn
->type
] - 2;
3477 /* Switch mas to prev node */
3478 mat_add(mast
->free
, mas
->node
);
3480 /* Start using mast->l for the left side. */
3481 tmp_mas
.node
= mast
->l
->node
;
3484 mat_add(mast
->free
, tmp_mas
.node
);
3485 tmp_mas
.node
= mast
->r
->node
;
3487 split
= slot_total
- split
;
3489 split
= mab_no_null_split(mast
->bn
, split
, mt_slots
[mast
->bn
->type
]);
3490 /* Update parent slot for split calculation. */
3492 mast
->orig_l
->offset
+= end
+ 1;
3494 mast_split_data(mast
, mas
, split
);
3495 mast_fill_bnode(mast
, mas
, 2);
3496 mas_split_final_node(mast
, mas
, height
+ 1);
3501 * mas_split() - Split data that is too big for one node into two.
3502 * @mas: The maple state
3503 * @b_node: The maple big node
3504 * Return: 1 on success, 0 on failure.
3506 static int mas_split(struct ma_state
*mas
, struct maple_big_node
*b_node
)
3508 struct maple_subtree_state mast
;
3510 unsigned char mid_split
, split
= 0;
3513 * Splitting is handled differently from any other B-tree; the Maple
3514 * Tree splits upwards. Splitting up means that the split operation
3515 * occurs when the walk of the tree hits the leaves and not on the way
3516 * down. The reason for splitting up is that it is impossible to know
3517 * how much space will be needed until the leaf is (or leaves are)
3518 * reached. Since overwriting data is allowed and a range could
3519 * overwrite more than one range or result in changing one entry into 3
3520 * entries, it is impossible to know if a split is required until the
3523 * Splitting is a balancing act between keeping allocations to a minimum
3524 * and avoiding a 'jitter' event where a tree is expanded to make room
3525 * for an entry followed by a contraction when the entry is removed. To
3526 * accomplish the balance, there are empty slots remaining in both left
3527 * and right nodes after a split.
3529 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3530 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3531 MA_STATE(prev_l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3532 MA_STATE(prev_r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3533 MA_TOPIARY(mat
, mas
->tree
);
3535 trace_ma_op(__func__
, mas
);
3536 mas
->depth
= mas_mt_height(mas
);
3537 /* Allocation failures will happen early. */
3538 mas_node_count(mas
, 1 + mas
->depth
* 2);
3539 if (mas_is_err(mas
))
3544 mast
.orig_l
= &prev_l_mas
;
3545 mast
.orig_r
= &prev_r_mas
;
3549 while (height
++ <= mas
->depth
) {
3550 if (mt_slots
[b_node
->type
] > b_node
->b_end
) {
3551 mas_split_final_node(&mast
, mas
, height
);
3555 l_mas
= r_mas
= *mas
;
3556 l_mas
.node
= mas_new_ma_node(mas
, b_node
);
3557 r_mas
.node
= mas_new_ma_node(mas
, b_node
);
3559 * Another way that 'jitter' is avoided is to terminate a split up early if the
3560 * left or right node has space to spare. This is referred to as "pushing left"
3561 * or "pushing right" and is similar to the B* tree, except the nodes left or
3562 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3563 * is a significant savings.
3565 /* Try to push left. */
3566 if (mas_push_data(mas
, height
, &mast
, true))
3569 /* Try to push right. */
3570 if (mas_push_data(mas
, height
, &mast
, false))
3573 split
= mab_calc_split(mas
, b_node
, &mid_split
, prev_l_mas
.min
);
3574 mast_split_data(&mast
, mas
, split
);
3576 * Usually correct, mab_mas_cp in the above call overwrites
3579 mast
.r
->max
= mas
->max
;
3580 mast_fill_bnode(&mast
, mas
, 1);
3581 prev_l_mas
= *mast
.l
;
3582 prev_r_mas
= *mast
.r
;
3585 /* Set the original node as dead */
3586 mat_add(mast
.free
, mas
->node
);
3587 mas
->node
= l_mas
.node
;
3588 mas_wmb_replace(mas
, mast
.free
, NULL
);
3589 mtree_range_walk(mas
);
3594 * mas_reuse_node() - Reuse the node to store the data.
3595 * @wr_mas: The maple write state
3596 * @bn: The maple big node
3597 * @end: The end of the data.
3599 * Will always return false in RCU mode.
3601 * Return: True if node was reused, false otherwise.
3603 static inline bool mas_reuse_node(struct ma_wr_state
*wr_mas
,
3604 struct maple_big_node
*bn
, unsigned char end
)
3606 /* Need to be rcu safe. */
3607 if (mt_in_rcu(wr_mas
->mas
->tree
))
3610 if (end
> bn
->b_end
) {
3611 int clear
= mt_slots
[wr_mas
->type
] - bn
->b_end
;
3613 memset(wr_mas
->slots
+ bn
->b_end
, 0, sizeof(void *) * clear
--);
3614 memset(wr_mas
->pivots
+ bn
->b_end
, 0, sizeof(void *) * clear
);
3616 mab_mas_cp(bn
, 0, bn
->b_end
, wr_mas
->mas
, false);
3621 * mas_commit_b_node() - Commit the big node into the tree.
3622 * @wr_mas: The maple write state
3623 * @b_node: The maple big node
3624 * @end: The end of the data.
3626 static noinline_for_kasan
int mas_commit_b_node(struct ma_wr_state
*wr_mas
,
3627 struct maple_big_node
*b_node
, unsigned char end
)
3629 struct maple_node
*node
;
3630 unsigned char b_end
= b_node
->b_end
;
3631 enum maple_type b_type
= b_node
->type
;
3633 if ((b_end
< mt_min_slots
[b_type
]) &&
3634 (!mte_is_root(wr_mas
->mas
->node
)) &&
3635 (mas_mt_height(wr_mas
->mas
) > 1))
3636 return mas_rebalance(wr_mas
->mas
, b_node
);
3638 if (b_end
>= mt_slots
[b_type
])
3639 return mas_split(wr_mas
->mas
, b_node
);
3641 if (mas_reuse_node(wr_mas
, b_node
, end
))
3644 mas_node_count(wr_mas
->mas
, 1);
3645 if (mas_is_err(wr_mas
->mas
))
3648 node
= mas_pop_node(wr_mas
->mas
);
3649 node
->parent
= mas_mn(wr_mas
->mas
)->parent
;
3650 wr_mas
->mas
->node
= mt_mk_node(node
, b_type
);
3651 mab_mas_cp(b_node
, 0, b_end
, wr_mas
->mas
, false);
3652 mas_replace(wr_mas
->mas
, false);
3654 mas_update_gap(wr_mas
->mas
);
3659 * mas_root_expand() - Expand a root to a node
3660 * @mas: The maple state
3661 * @entry: The entry to store into the tree
3663 static inline int mas_root_expand(struct ma_state
*mas
, void *entry
)
3665 void *contents
= mas_root_locked(mas
);
3666 enum maple_type type
= maple_leaf_64
;
3667 struct maple_node
*node
;
3669 unsigned long *pivots
;
3672 mas_node_count(mas
, 1);
3673 if (unlikely(mas_is_err(mas
)))
3676 node
= mas_pop_node(mas
);
3677 pivots
= ma_pivots(node
, type
);
3678 slots
= ma_slots(node
, type
);
3679 node
->parent
= ma_parent_ptr(
3680 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
3681 mas
->node
= mt_mk_node(node
, type
);
3685 rcu_assign_pointer(slots
[slot
], contents
);
3686 if (likely(mas
->index
> 1))
3689 pivots
[slot
++] = mas
->index
- 1;
3692 rcu_assign_pointer(slots
[slot
], entry
);
3694 pivots
[slot
] = mas
->last
;
3695 if (mas
->last
!= ULONG_MAX
)
3698 mas_set_height(mas
);
3699 ma_set_meta(node
, maple_leaf_64
, 0, slot
);
3700 /* swap the new root into the tree */
3701 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3705 static inline void mas_store_root(struct ma_state
*mas
, void *entry
)
3707 if (likely((mas
->last
!= 0) || (mas
->index
!= 0)))
3708 mas_root_expand(mas
, entry
);
3709 else if (((unsigned long) (entry
) & 3) == 2)
3710 mas_root_expand(mas
, entry
);
3712 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3713 mas
->node
= MAS_START
;
3718 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3720 * @mas: The maple state
3721 * @piv: The pivot value being written
3722 * @type: The maple node type
3723 * @entry: The data to write
3725 * Spanning writes are writes that start in one node and end in another OR if
3726 * the write of a %NULL will cause the node to end with a %NULL.
3728 * Return: True if this is a spanning write, false otherwise.
3730 static bool mas_is_span_wr(struct ma_wr_state
*wr_mas
)
3733 unsigned long last
= wr_mas
->mas
->last
;
3734 unsigned long piv
= wr_mas
->r_max
;
3735 enum maple_type type
= wr_mas
->type
;
3736 void *entry
= wr_mas
->entry
;
3738 /* Contained in this pivot */
3742 max
= wr_mas
->mas
->max
;
3743 if (unlikely(ma_is_leaf(type
))) {
3744 /* Fits in the node, but may span slots. */
3748 /* Writes to the end of the node but not null. */
3749 if ((last
== max
) && entry
)
3753 * Writing ULONG_MAX is not a spanning write regardless of the
3754 * value being written as long as the range fits in the node.
3756 if ((last
== ULONG_MAX
) && (last
== max
))
3758 } else if (piv
== last
) {
3762 /* Detect spanning store wr walk */
3763 if (last
== ULONG_MAX
)
3767 trace_ma_write(__func__
, wr_mas
->mas
, piv
, entry
);
3772 static inline void mas_wr_walk_descend(struct ma_wr_state
*wr_mas
)
3774 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
3775 mas_wr_node_walk(wr_mas
);
3776 wr_mas
->slots
= ma_slots(wr_mas
->node
, wr_mas
->type
);
3779 static inline void mas_wr_walk_traverse(struct ma_wr_state
*wr_mas
)
3781 wr_mas
->mas
->max
= wr_mas
->r_max
;
3782 wr_mas
->mas
->min
= wr_mas
->r_min
;
3783 wr_mas
->mas
->node
= wr_mas
->content
;
3784 wr_mas
->mas
->offset
= 0;
3785 wr_mas
->mas
->depth
++;
3788 * mas_wr_walk() - Walk the tree for a write.
3789 * @wr_mas: The maple write state
3791 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3793 * Return: True if it's contained in a node, false on spanning write.
3795 static bool mas_wr_walk(struct ma_wr_state
*wr_mas
)
3797 struct ma_state
*mas
= wr_mas
->mas
;
3800 mas_wr_walk_descend(wr_mas
);
3801 if (unlikely(mas_is_span_wr(wr_mas
)))
3804 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3806 if (ma_is_leaf(wr_mas
->type
))
3809 mas_wr_walk_traverse(wr_mas
);
3815 static bool mas_wr_walk_index(struct ma_wr_state
*wr_mas
)
3817 struct ma_state
*mas
= wr_mas
->mas
;
3820 mas_wr_walk_descend(wr_mas
);
3821 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3823 if (ma_is_leaf(wr_mas
->type
))
3825 mas_wr_walk_traverse(wr_mas
);
3831 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3832 * @l_wr_mas: The left maple write state
3833 * @r_wr_mas: The right maple write state
3835 static inline void mas_extend_spanning_null(struct ma_wr_state
*l_wr_mas
,
3836 struct ma_wr_state
*r_wr_mas
)
3838 struct ma_state
*r_mas
= r_wr_mas
->mas
;
3839 struct ma_state
*l_mas
= l_wr_mas
->mas
;
3840 unsigned char l_slot
;
3842 l_slot
= l_mas
->offset
;
3843 if (!l_wr_mas
->content
)
3844 l_mas
->index
= l_wr_mas
->r_min
;
3846 if ((l_mas
->index
== l_wr_mas
->r_min
) &&
3848 !mas_slot_locked(l_mas
, l_wr_mas
->slots
, l_slot
- 1))) {
3850 l_mas
->index
= l_wr_mas
->pivots
[l_slot
- 2] + 1;
3852 l_mas
->index
= l_mas
->min
;
3854 l_mas
->offset
= l_slot
- 1;
3857 if (!r_wr_mas
->content
) {
3858 if (r_mas
->last
< r_wr_mas
->r_max
)
3859 r_mas
->last
= r_wr_mas
->r_max
;
3861 } else if ((r_mas
->last
== r_wr_mas
->r_max
) &&
3862 (r_mas
->last
< r_mas
->max
) &&
3863 !mas_slot_locked(r_mas
, r_wr_mas
->slots
, r_mas
->offset
+ 1)) {
3864 r_mas
->last
= mas_safe_pivot(r_mas
, r_wr_mas
->pivots
,
3865 r_wr_mas
->type
, r_mas
->offset
+ 1);
3870 static inline void *mas_state_walk(struct ma_state
*mas
)
3874 entry
= mas_start(mas
);
3875 if (mas_is_none(mas
))
3878 if (mas_is_ptr(mas
))
3881 return mtree_range_walk(mas
);
3885 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3888 * @mas: The maple state.
3890 * Note: Leaves mas in undesirable state.
3891 * Return: The entry for @mas->index or %NULL on dead node.
3893 static inline void *mtree_lookup_walk(struct ma_state
*mas
)
3895 unsigned long *pivots
;
3896 unsigned char offset
;
3897 struct maple_node
*node
;
3898 struct maple_enode
*next
;
3899 enum maple_type type
;
3908 node
= mte_to_node(next
);
3909 type
= mte_node_type(next
);
3910 pivots
= ma_pivots(node
, type
);
3911 end
= ma_data_end(node
, type
, pivots
, max
);
3912 if (unlikely(ma_dead_node(node
)))
3915 if (pivots
[offset
] >= mas
->index
) {
3916 max
= pivots
[offset
];
3919 } while (++offset
< end
);
3921 slots
= ma_slots(node
, type
);
3922 next
= mt_slot(mas
->tree
, slots
, offset
);
3923 if (unlikely(ma_dead_node(node
)))
3925 } while (!ma_is_leaf(type
));
3927 return (void *)next
;
3935 * mas_new_root() - Create a new root node that only contains the entry passed
3937 * @mas: The maple state
3938 * @entry: The entry to store.
3940 * Only valid when the index == 0 and the last == ULONG_MAX
3942 * Return 0 on error, 1 on success.
3944 static inline int mas_new_root(struct ma_state
*mas
, void *entry
)
3946 struct maple_enode
*root
= mas_root_locked(mas
);
3947 enum maple_type type
= maple_leaf_64
;
3948 struct maple_node
*node
;
3950 unsigned long *pivots
;
3952 if (!entry
&& !mas
->index
&& mas
->last
== ULONG_MAX
) {
3954 mas_set_height(mas
);
3955 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3956 mas
->node
= MAS_START
;
3960 mas_node_count(mas
, 1);
3961 if (mas_is_err(mas
))
3964 node
= mas_pop_node(mas
);
3965 pivots
= ma_pivots(node
, type
);
3966 slots
= ma_slots(node
, type
);
3967 node
->parent
= ma_parent_ptr(
3968 ((unsigned long)mas
->tree
| MA_ROOT_PARENT
));
3969 mas
->node
= mt_mk_node(node
, type
);
3970 rcu_assign_pointer(slots
[0], entry
);
3971 pivots
[0] = mas
->last
;
3973 mas_set_height(mas
);
3974 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3977 if (xa_is_node(root
))
3978 mte_destroy_walk(root
, mas
->tree
);
3983 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3984 * and new nodes where necessary, then place the sub-tree in the actual tree.
3985 * Note that mas is expected to point to the node which caused the store to
3987 * @wr_mas: The maple write state
3989 * Return: 0 on error, positive on success.
3991 static inline int mas_wr_spanning_store(struct ma_wr_state
*wr_mas
)
3993 struct maple_subtree_state mast
;
3994 struct maple_big_node b_node
;
3995 struct ma_state
*mas
;
3996 unsigned char height
;
3998 /* Left and Right side of spanning store */
3999 MA_STATE(l_mas
, NULL
, 0, 0);
4000 MA_STATE(r_mas
, NULL
, 0, 0);
4002 MA_WR_STATE(r_wr_mas
, &r_mas
, wr_mas
->entry
);
4003 MA_WR_STATE(l_wr_mas
, &l_mas
, wr_mas
->entry
);
4006 * A store operation that spans multiple nodes is called a spanning
4007 * store and is handled early in the store call stack by the function
4008 * mas_is_span_wr(). When a spanning store is identified, the maple
4009 * state is duplicated. The first maple state walks the left tree path
4010 * to ``index``, the duplicate walks the right tree path to ``last``.
4011 * The data in the two nodes are combined into a single node, two nodes,
4012 * or possibly three nodes (see the 3-way split above). A ``NULL``
4013 * written to the last entry of a node is considered a spanning store as
4014 * a rebalance is required for the operation to complete and an overflow
4015 * of data may happen.
4018 trace_ma_op(__func__
, mas
);
4020 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
4021 return mas_new_root(mas
, wr_mas
->entry
);
4023 * Node rebalancing may occur due to this store, so there may be three new
4024 * entries per level plus a new root.
4026 height
= mas_mt_height(mas
);
4027 mas_node_count(mas
, 1 + height
* 3);
4028 if (mas_is_err(mas
))
4032 * Set up right side. Need to get to the next offset after the spanning
4033 * store to ensure it's not NULL and to combine both the next node and
4034 * the node with the start together.
4037 /* Avoid overflow, walk to next slot in the tree. */
4041 r_mas
.index
= r_mas
.last
;
4042 mas_wr_walk_index(&r_wr_mas
);
4043 r_mas
.last
= r_mas
.index
= mas
->last
;
4045 /* Set up left side. */
4047 mas_wr_walk_index(&l_wr_mas
);
4049 if (!wr_mas
->entry
) {
4050 mas_extend_spanning_null(&l_wr_mas
, &r_wr_mas
);
4051 mas
->offset
= l_mas
.offset
;
4052 mas
->index
= l_mas
.index
;
4053 mas
->last
= l_mas
.last
= r_mas
.last
;
4056 /* expanding NULLs may make this cover the entire range */
4057 if (!l_mas
.index
&& r_mas
.last
== ULONG_MAX
) {
4058 mas_set_range(mas
, 0, ULONG_MAX
);
4059 return mas_new_root(mas
, wr_mas
->entry
);
4062 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4063 /* Copy l_mas and store the value in b_node. */
4064 mas_store_b_node(&l_wr_mas
, &b_node
, l_wr_mas
.node_end
);
4065 /* Copy r_mas into b_node. */
4066 if (r_mas
.offset
<= r_wr_mas
.node_end
)
4067 mas_mab_cp(&r_mas
, r_mas
.offset
, r_wr_mas
.node_end
,
4068 &b_node
, b_node
.b_end
+ 1);
4072 /* Stop spanning searches by searching for just index. */
4073 l_mas
.index
= l_mas
.last
= mas
->index
;
4076 mast
.orig_l
= &l_mas
;
4077 mast
.orig_r
= &r_mas
;
4078 /* Combine l_mas and r_mas and split them up evenly again. */
4079 return mas_spanning_rebalance(mas
, &mast
, height
+ 1);
4083 * mas_wr_node_store() - Attempt to store the value in a node
4084 * @wr_mas: The maple write state
4086 * Attempts to reuse the node, but may allocate.
4088 * Return: True if stored, false otherwise
4090 static inline bool mas_wr_node_store(struct ma_wr_state
*wr_mas
)
4092 struct ma_state
*mas
= wr_mas
->mas
;
4093 void __rcu
**dst_slots
;
4094 unsigned long *dst_pivots
;
4095 unsigned char dst_offset
;
4096 unsigned char new_end
= wr_mas
->node_end
;
4097 unsigned char offset
;
4098 unsigned char node_slots
= mt_slots
[wr_mas
->type
];
4099 struct maple_node reuse
, *newnode
;
4100 unsigned char copy_size
, max_piv
= mt_pivots
[wr_mas
->type
];
4101 bool in_rcu
= mt_in_rcu(mas
->tree
);
4103 offset
= mas
->offset
;
4104 if (mas
->last
== wr_mas
->r_max
) {
4105 /* runs right to the end of the node */
4106 if (mas
->last
== mas
->max
)
4108 /* don't copy this offset */
4109 wr_mas
->offset_end
++;
4110 } else if (mas
->last
< wr_mas
->r_max
) {
4111 /* new range ends in this range */
4112 if (unlikely(wr_mas
->r_max
== ULONG_MAX
))
4113 mas_bulk_rebalance(mas
, wr_mas
->node_end
, wr_mas
->type
);
4117 if (wr_mas
->end_piv
== mas
->last
)
4118 wr_mas
->offset_end
++;
4120 new_end
-= wr_mas
->offset_end
- offset
- 1;
4123 /* new range starts within a range */
4124 if (wr_mas
->r_min
< mas
->index
)
4127 /* Not enough room */
4128 if (new_end
>= node_slots
)
4131 /* Not enough data. */
4132 if (!mte_is_root(mas
->node
) && (new_end
<= mt_min_slots
[wr_mas
->type
]) &&
4133 !(mas
->mas_flags
& MA_STATE_BULK
))
4138 mas_node_count(mas
, 1);
4139 if (mas_is_err(mas
))
4142 newnode
= mas_pop_node(mas
);
4144 memset(&reuse
, 0, sizeof(struct maple_node
));
4148 newnode
->parent
= mas_mn(mas
)->parent
;
4149 dst_pivots
= ma_pivots(newnode
, wr_mas
->type
);
4150 dst_slots
= ma_slots(newnode
, wr_mas
->type
);
4151 /* Copy from start to insert point */
4152 memcpy(dst_pivots
, wr_mas
->pivots
, sizeof(unsigned long) * (offset
+ 1));
4153 memcpy(dst_slots
, wr_mas
->slots
, sizeof(void *) * (offset
+ 1));
4154 dst_offset
= offset
;
4156 /* Handle insert of new range starting after old range */
4157 if (wr_mas
->r_min
< mas
->index
) {
4159 rcu_assign_pointer(dst_slots
[dst_offset
], wr_mas
->content
);
4160 dst_pivots
[dst_offset
++] = mas
->index
- 1;
4163 /* Store the new entry and range end. */
4164 if (dst_offset
< max_piv
)
4165 dst_pivots
[dst_offset
] = mas
->last
;
4166 mas
->offset
= dst_offset
;
4167 rcu_assign_pointer(dst_slots
[dst_offset
], wr_mas
->entry
);
4170 * this range wrote to the end of the node or it overwrote the rest of
4173 if (wr_mas
->offset_end
> wr_mas
->node_end
|| mas
->last
>= mas
->max
) {
4174 new_end
= dst_offset
;
4179 /* Copy to the end of node if necessary. */
4180 copy_size
= wr_mas
->node_end
- wr_mas
->offset_end
+ 1;
4181 memcpy(dst_slots
+ dst_offset
, wr_mas
->slots
+ wr_mas
->offset_end
,
4182 sizeof(void *) * copy_size
);
4183 if (dst_offset
< max_piv
) {
4184 if (copy_size
> max_piv
- dst_offset
)
4185 copy_size
= max_piv
- dst_offset
;
4187 memcpy(dst_pivots
+ dst_offset
,
4188 wr_mas
->pivots
+ wr_mas
->offset_end
,
4189 sizeof(unsigned long) * copy_size
);
4192 if ((wr_mas
->node_end
== node_slots
- 1) && (new_end
< node_slots
- 1))
4193 dst_pivots
[new_end
] = mas
->max
;
4196 mas_leaf_set_meta(mas
, newnode
, dst_pivots
, maple_leaf_64
, new_end
);
4198 mte_set_node_dead(mas
->node
);
4199 mas
->node
= mt_mk_node(newnode
, wr_mas
->type
);
4200 mas_replace(mas
, false);
4202 memcpy(wr_mas
->node
, newnode
, sizeof(struct maple_node
));
4204 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
4205 mas_update_gap(mas
);
4210 * mas_wr_slot_store: Attempt to store a value in a slot.
4211 * @wr_mas: the maple write state
4213 * Return: True if stored, false otherwise
4215 static inline bool mas_wr_slot_store(struct ma_wr_state
*wr_mas
)
4217 struct ma_state
*mas
= wr_mas
->mas
;
4218 unsigned long lmax
; /* Logical max. */
4219 unsigned char offset
= mas
->offset
;
4221 if ((wr_mas
->r_max
> mas
->last
) && ((wr_mas
->r_min
!= mas
->index
) ||
4222 (offset
!= wr_mas
->node_end
)))
4225 if (offset
== wr_mas
->node_end
- 1)
4228 lmax
= wr_mas
->pivots
[offset
+ 1];
4230 /* going to overwrite too many slots. */
4231 if (lmax
< mas
->last
)
4234 if (wr_mas
->r_min
== mas
->index
) {
4235 /* overwriting two or more ranges with one. */
4236 if (lmax
== mas
->last
)
4239 /* Overwriting all of offset and a portion of offset + 1. */
4240 rcu_assign_pointer(wr_mas
->slots
[offset
], wr_mas
->entry
);
4241 wr_mas
->pivots
[offset
] = mas
->last
;
4245 /* Doesn't end on the next range end. */
4246 if (lmax
!= mas
->last
)
4249 /* Overwriting a portion of offset and all of offset + 1 */
4250 if ((offset
+ 1 < mt_pivots
[wr_mas
->type
]) &&
4251 (wr_mas
->entry
|| wr_mas
->pivots
[offset
+ 1]))
4252 wr_mas
->pivots
[offset
+ 1] = mas
->last
;
4254 rcu_assign_pointer(wr_mas
->slots
[offset
+ 1], wr_mas
->entry
);
4255 wr_mas
->pivots
[offset
] = mas
->index
- 1;
4256 mas
->offset
++; /* Keep mas accurate. */
4259 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
4260 mas_update_gap(mas
);
4264 static inline void mas_wr_end_piv(struct ma_wr_state
*wr_mas
)
4266 while ((wr_mas
->mas
->last
> wr_mas
->end_piv
) &&
4267 (wr_mas
->offset_end
< wr_mas
->node_end
))
4268 wr_mas
->end_piv
= wr_mas
->pivots
[++wr_mas
->offset_end
];
4270 if (wr_mas
->mas
->last
> wr_mas
->end_piv
)
4271 wr_mas
->end_piv
= wr_mas
->mas
->max
;
4274 static inline void mas_wr_extend_null(struct ma_wr_state
*wr_mas
)
4276 struct ma_state
*mas
= wr_mas
->mas
;
4278 if (mas
->last
< wr_mas
->end_piv
&& !wr_mas
->slots
[wr_mas
->offset_end
])
4279 mas
->last
= wr_mas
->end_piv
;
4281 /* Check next slot(s) if we are overwriting the end */
4282 if ((mas
->last
== wr_mas
->end_piv
) &&
4283 (wr_mas
->node_end
!= wr_mas
->offset_end
) &&
4284 !wr_mas
->slots
[wr_mas
->offset_end
+ 1]) {
4285 wr_mas
->offset_end
++;
4286 if (wr_mas
->offset_end
== wr_mas
->node_end
)
4287 mas
->last
= mas
->max
;
4289 mas
->last
= wr_mas
->pivots
[wr_mas
->offset_end
];
4290 wr_mas
->end_piv
= mas
->last
;
4293 if (!wr_mas
->content
) {
4294 /* If this one is null, the next and prev are not */
4295 mas
->index
= wr_mas
->r_min
;
4297 /* Check prev slot if we are overwriting the start */
4298 if (mas
->index
== wr_mas
->r_min
&& mas
->offset
&&
4299 !wr_mas
->slots
[mas
->offset
- 1]) {
4301 wr_mas
->r_min
= mas
->index
=
4302 mas_safe_min(mas
, wr_mas
->pivots
, mas
->offset
);
4303 wr_mas
->r_max
= wr_mas
->pivots
[mas
->offset
];
4308 static inline bool mas_wr_append(struct ma_wr_state
*wr_mas
)
4310 unsigned char end
= wr_mas
->node_end
;
4311 unsigned char new_end
= end
+ 1;
4312 struct ma_state
*mas
= wr_mas
->mas
;
4313 unsigned char node_pivots
= mt_pivots
[wr_mas
->type
];
4315 if ((mas
->index
!= wr_mas
->r_min
) && (mas
->last
== wr_mas
->r_max
)) {
4316 if (new_end
< node_pivots
)
4317 wr_mas
->pivots
[new_end
] = wr_mas
->pivots
[end
];
4319 if (new_end
< node_pivots
)
4320 ma_set_meta(wr_mas
->node
, maple_leaf_64
, 0, new_end
);
4322 rcu_assign_pointer(wr_mas
->slots
[new_end
], wr_mas
->entry
);
4323 mas
->offset
= new_end
;
4324 wr_mas
->pivots
[end
] = mas
->index
- 1;
4329 if ((mas
->index
== wr_mas
->r_min
) && (mas
->last
< wr_mas
->r_max
)) {
4330 if (new_end
< node_pivots
)
4331 wr_mas
->pivots
[new_end
] = wr_mas
->pivots
[end
];
4333 rcu_assign_pointer(wr_mas
->slots
[new_end
], wr_mas
->content
);
4334 if (new_end
< node_pivots
)
4335 ma_set_meta(wr_mas
->node
, maple_leaf_64
, 0, new_end
);
4337 wr_mas
->pivots
[end
] = mas
->last
;
4338 rcu_assign_pointer(wr_mas
->slots
[end
], wr_mas
->entry
);
4346 * mas_wr_bnode() - Slow path for a modification.
4347 * @wr_mas: The write maple state
4349 * This is where split, rebalance end up.
4351 static void mas_wr_bnode(struct ma_wr_state
*wr_mas
)
4353 struct maple_big_node b_node
;
4355 trace_ma_write(__func__
, wr_mas
->mas
, 0, wr_mas
->entry
);
4356 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4357 mas_store_b_node(wr_mas
, &b_node
, wr_mas
->offset_end
);
4358 mas_commit_b_node(wr_mas
, &b_node
, wr_mas
->node_end
);
4361 static inline void mas_wr_modify(struct ma_wr_state
*wr_mas
)
4363 unsigned char node_slots
;
4364 unsigned char node_size
;
4365 struct ma_state
*mas
= wr_mas
->mas
;
4367 /* Direct replacement */
4368 if (wr_mas
->r_min
== mas
->index
&& wr_mas
->r_max
== mas
->last
) {
4369 rcu_assign_pointer(wr_mas
->slots
[mas
->offset
], wr_mas
->entry
);
4370 if (!!wr_mas
->entry
^ !!wr_mas
->content
)
4371 mas_update_gap(mas
);
4375 /* Attempt to append */
4376 node_slots
= mt_slots
[wr_mas
->type
];
4377 node_size
= wr_mas
->node_end
- wr_mas
->offset_end
+ mas
->offset
+ 2;
4378 if (mas
->max
== ULONG_MAX
)
4381 /* slot and node store will not fit, go to the slow path */
4382 if (unlikely(node_size
>= node_slots
))
4385 if (wr_mas
->entry
&& (wr_mas
->node_end
< node_slots
- 1) &&
4386 (mas
->offset
== wr_mas
->node_end
) && mas_wr_append(wr_mas
)) {
4387 if (!wr_mas
->content
|| !wr_mas
->entry
)
4388 mas_update_gap(mas
);
4392 if ((wr_mas
->offset_end
- mas
->offset
<= 1) && mas_wr_slot_store(wr_mas
))
4394 else if (mas_wr_node_store(wr_mas
))
4397 if (mas_is_err(mas
))
4401 mas_wr_bnode(wr_mas
);
4405 * mas_wr_store_entry() - Internal call to store a value
4406 * @mas: The maple state
4407 * @entry: The entry to store.
4409 * Return: The contents that was stored at the index.
4411 static inline void *mas_wr_store_entry(struct ma_wr_state
*wr_mas
)
4413 struct ma_state
*mas
= wr_mas
->mas
;
4415 wr_mas
->content
= mas_start(mas
);
4416 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4417 mas_store_root(mas
, wr_mas
->entry
);
4418 return wr_mas
->content
;
4421 if (unlikely(!mas_wr_walk(wr_mas
))) {
4422 mas_wr_spanning_store(wr_mas
);
4423 return wr_mas
->content
;
4426 /* At this point, we are at the leaf node that needs to be altered. */
4427 wr_mas
->end_piv
= wr_mas
->r_max
;
4428 mas_wr_end_piv(wr_mas
);
4431 mas_wr_extend_null(wr_mas
);
4433 /* New root for a single pointer */
4434 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
)) {
4435 mas_new_root(mas
, wr_mas
->entry
);
4436 return wr_mas
->content
;
4439 mas_wr_modify(wr_mas
);
4440 return wr_mas
->content
;
4444 * mas_insert() - Internal call to insert a value
4445 * @mas: The maple state
4446 * @entry: The entry to store
4448 * Return: %NULL or the contents that already exists at the requested index
4449 * otherwise. The maple state needs to be checked for error conditions.
4451 static inline void *mas_insert(struct ma_state
*mas
, void *entry
)
4453 MA_WR_STATE(wr_mas
, mas
, entry
);
4456 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4457 * tree. If the insert fits exactly into an existing gap with a value
4458 * of NULL, then the slot only needs to be written with the new value.
4459 * If the range being inserted is adjacent to another range, then only a
4460 * single pivot needs to be inserted (as well as writing the entry). If
4461 * the new range is within a gap but does not touch any other ranges,
4462 * then two pivots need to be inserted: the start - 1, and the end. As
4463 * usual, the entry must be written. Most operations require a new node
4464 * to be allocated and replace an existing node to ensure RCU safety,
4465 * when in RCU mode. The exception to requiring a newly allocated node
4466 * is when inserting at the end of a node (appending). When done
4467 * carefully, appending can reuse the node in place.
4469 wr_mas
.content
= mas_start(mas
);
4473 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4474 mas_store_root(mas
, entry
);
4478 /* spanning writes always overwrite something */
4479 if (!mas_wr_walk(&wr_mas
))
4482 /* At this point, we are at the leaf node that needs to be altered. */
4483 wr_mas
.offset_end
= mas
->offset
;
4484 wr_mas
.end_piv
= wr_mas
.r_max
;
4486 if (wr_mas
.content
|| (mas
->last
> wr_mas
.r_max
))
4492 mas_wr_modify(&wr_mas
);
4493 return wr_mas
.content
;
4496 mas_set_err(mas
, -EEXIST
);
4497 return wr_mas
.content
;
4502 * mas_prev_node() - Find the prev non-null entry at the same level in the
4503 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4504 * @mas: The maple state
4505 * @min: The lower limit to search
4507 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4508 * Return: 1 if the node is dead, 0 otherwise.
4510 static inline int mas_prev_node(struct ma_state
*mas
, unsigned long min
)
4515 struct maple_node
*node
;
4516 struct maple_enode
*enode
;
4517 unsigned long *pivots
;
4519 if (mas_is_none(mas
))
4525 if (ma_is_root(node
))
4529 if (unlikely(mas_ascend(mas
)))
4531 offset
= mas
->offset
;
4536 mt
= mte_node_type(mas
->node
);
4538 slots
= ma_slots(node
, mt
);
4539 pivots
= ma_pivots(node
, mt
);
4540 if (unlikely(ma_dead_node(node
)))
4543 mas
->max
= pivots
[offset
];
4545 mas
->min
= pivots
[offset
- 1] + 1;
4546 if (unlikely(ma_dead_node(node
)))
4554 enode
= mas_slot(mas
, slots
, offset
);
4555 if (unlikely(ma_dead_node(node
)))
4559 mt
= mte_node_type(mas
->node
);
4561 slots
= ma_slots(node
, mt
);
4562 pivots
= ma_pivots(node
, mt
);
4563 offset
= ma_data_end(node
, mt
, pivots
, mas
->max
);
4564 if (unlikely(ma_dead_node(node
)))
4568 mas
->min
= pivots
[offset
- 1] + 1;
4570 if (offset
< mt_pivots
[mt
])
4571 mas
->max
= pivots
[offset
];
4577 mas
->node
= mas_slot(mas
, slots
, offset
);
4578 if (unlikely(ma_dead_node(node
)))
4581 mas
->offset
= mas_data_end(mas
);
4582 if (unlikely(mte_dead_node(mas
->node
)))
4588 mas
->offset
= offset
;
4590 mas
->min
= pivots
[offset
- 1] + 1;
4592 if (unlikely(ma_dead_node(node
)))
4595 mas
->node
= MAS_NONE
;
4600 * mas_next_node() - Get the next node at the same level in the tree.
4601 * @mas: The maple state
4602 * @max: The maximum pivot value to check.
4604 * The next value will be mas->node[mas->offset] or MAS_NONE.
4605 * Return: 1 on dead node, 0 otherwise.
4607 static inline int mas_next_node(struct ma_state
*mas
, struct maple_node
*node
,
4610 unsigned long min
, pivot
;
4611 unsigned long *pivots
;
4612 struct maple_enode
*enode
;
4614 unsigned char offset
;
4615 unsigned char node_end
;
4619 if (mas
->max
>= max
)
4624 if (ma_is_root(node
))
4631 if (unlikely(mas_ascend(mas
)))
4634 offset
= mas
->offset
;
4637 mt
= mte_node_type(mas
->node
);
4638 pivots
= ma_pivots(node
, mt
);
4639 node_end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
4640 if (unlikely(ma_dead_node(node
)))
4643 } while (unlikely(offset
== node_end
));
4645 slots
= ma_slots(node
, mt
);
4646 pivot
= mas_safe_pivot(mas
, pivots
, ++offset
, mt
);
4647 while (unlikely(level
> 1)) {
4648 /* Descend, if necessary */
4649 enode
= mas_slot(mas
, slots
, offset
);
4650 if (unlikely(ma_dead_node(node
)))
4656 mt
= mte_node_type(mas
->node
);
4657 slots
= ma_slots(node
, mt
);
4658 pivots
= ma_pivots(node
, mt
);
4659 if (unlikely(ma_dead_node(node
)))
4666 enode
= mas_slot(mas
, slots
, offset
);
4667 if (unlikely(ma_dead_node(node
)))
4676 if (unlikely(ma_dead_node(node
)))
4679 mas
->node
= MAS_NONE
;
4684 * mas_next_nentry() - Get the next node entry
4685 * @mas: The maple state
4686 * @max: The maximum value to check
4687 * @*range_start: Pointer to store the start of the range.
4689 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4690 * pivot of the entry.
4692 * Return: The next entry, %NULL otherwise
4694 static inline void *mas_next_nentry(struct ma_state
*mas
,
4695 struct maple_node
*node
, unsigned long max
, enum maple_type type
)
4697 unsigned char count
;
4698 unsigned long pivot
;
4699 unsigned long *pivots
;
4703 if (mas
->last
== mas
->max
) {
4704 mas
->index
= mas
->max
;
4708 slots
= ma_slots(node
, type
);
4709 pivots
= ma_pivots(node
, type
);
4710 count
= ma_data_end(node
, type
, pivots
, mas
->max
);
4711 if (unlikely(ma_dead_node(node
)))
4714 mas
->index
= mas_safe_min(mas
, pivots
, mas
->offset
);
4715 if (unlikely(ma_dead_node(node
)))
4718 if (mas
->index
> max
)
4721 if (mas
->offset
> count
)
4724 while (mas
->offset
< count
) {
4725 pivot
= pivots
[mas
->offset
];
4726 entry
= mas_slot(mas
, slots
, mas
->offset
);
4727 if (ma_dead_node(node
))
4736 mas
->index
= pivot
+ 1;
4740 if (mas
->index
> mas
->max
) {
4741 mas
->index
= mas
->last
;
4745 pivot
= mas_safe_pivot(mas
, pivots
, mas
->offset
, type
);
4746 entry
= mas_slot(mas
, slots
, mas
->offset
);
4747 if (ma_dead_node(node
))
4761 static inline void mas_rewalk(struct ma_state
*mas
, unsigned long index
)
4764 mas_set(mas
, index
);
4765 mas_state_walk(mas
);
4766 if (mas_is_start(mas
))
4771 * mas_next_entry() - Internal function to get the next entry.
4772 * @mas: The maple state
4773 * @limit: The maximum range start.
4775 * Set the @mas->node to the next entry and the range_start to
4776 * the beginning value for the entry. Does not check beyond @limit.
4777 * Sets @mas->index and @mas->last to the limit if it is hit.
4778 * Restarts on dead nodes.
4780 * Return: the next entry or %NULL.
4782 static inline void *mas_next_entry(struct ma_state
*mas
, unsigned long limit
)
4785 struct maple_enode
*prev_node
;
4786 struct maple_node
*node
;
4787 unsigned char offset
;
4791 if (mas
->index
> limit
) {
4792 mas
->index
= mas
->last
= limit
;
4798 offset
= mas
->offset
;
4799 prev_node
= mas
->node
;
4801 mt
= mte_node_type(mas
->node
);
4803 if (unlikely(mas
->offset
>= mt_slots
[mt
])) {
4804 mas
->offset
= mt_slots
[mt
] - 1;
4808 while (!mas_is_none(mas
)) {
4809 entry
= mas_next_nentry(mas
, node
, limit
, mt
);
4810 if (unlikely(ma_dead_node(node
))) {
4811 mas_rewalk(mas
, last
);
4818 if (unlikely((mas
->index
> limit
)))
4822 prev_node
= mas
->node
;
4823 offset
= mas
->offset
;
4824 if (unlikely(mas_next_node(mas
, node
, limit
))) {
4825 mas_rewalk(mas
, last
);
4830 mt
= mte_node_type(mas
->node
);
4833 mas
->index
= mas
->last
= limit
;
4834 mas
->offset
= offset
;
4835 mas
->node
= prev_node
;
4840 * mas_prev_nentry() - Get the previous node entry.
4841 * @mas: The maple state.
4842 * @limit: The lower limit to check for a value.
4844 * Return: the entry, %NULL otherwise.
4846 static inline void *mas_prev_nentry(struct ma_state
*mas
, unsigned long limit
,
4847 unsigned long index
)
4849 unsigned long pivot
, min
;
4850 unsigned char offset
;
4851 struct maple_node
*mn
;
4853 unsigned long *pivots
;
4862 mt
= mte_node_type(mas
->node
);
4863 offset
= mas
->offset
- 1;
4864 if (offset
>= mt_slots
[mt
])
4865 offset
= mt_slots
[mt
] - 1;
4867 slots
= ma_slots(mn
, mt
);
4868 pivots
= ma_pivots(mn
, mt
);
4869 if (unlikely(ma_dead_node(mn
))) {
4870 mas_rewalk(mas
, index
);
4874 if (offset
== mt_pivots
[mt
])
4877 pivot
= pivots
[offset
];
4879 if (unlikely(ma_dead_node(mn
))) {
4880 mas_rewalk(mas
, index
);
4884 while (offset
&& ((!mas_slot(mas
, slots
, offset
) && pivot
>= limit
) ||
4886 pivot
= pivots
[--offset
];
4888 min
= mas_safe_min(mas
, pivots
, offset
);
4889 entry
= mas_slot(mas
, slots
, offset
);
4890 if (unlikely(ma_dead_node(mn
))) {
4891 mas_rewalk(mas
, index
);
4895 if (likely(entry
)) {
4896 mas
->offset
= offset
;
4903 static inline void *mas_prev_entry(struct ma_state
*mas
, unsigned long min
)
4907 if (mas
->index
< min
) {
4908 mas
->index
= mas
->last
= min
;
4909 mas
->node
= MAS_NONE
;
4913 while (likely(!mas_is_none(mas
))) {
4914 entry
= mas_prev_nentry(mas
, min
, mas
->index
);
4915 if (unlikely(mas
->last
< min
))
4921 if (unlikely(mas_prev_node(mas
, min
))) {
4922 mas_rewalk(mas
, mas
->index
);
4931 mas
->index
= mas
->last
= min
;
4936 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4937 * highest gap address of a given size in a given node and descend.
4938 * @mas: The maple state
4939 * @size: The needed size.
4941 * Return: True if found in a leaf, false otherwise.
4944 static bool mas_rev_awalk(struct ma_state
*mas
, unsigned long size
,
4945 unsigned long *gap_min
, unsigned long *gap_max
)
4947 enum maple_type type
= mte_node_type(mas
->node
);
4948 struct maple_node
*node
= mas_mn(mas
);
4949 unsigned long *pivots
, *gaps
;
4951 unsigned long gap
= 0;
4952 unsigned long max
, min
;
4953 unsigned char offset
;
4955 if (unlikely(mas_is_err(mas
)))
4958 if (ma_is_dense(type
)) {
4960 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4964 pivots
= ma_pivots(node
, type
);
4965 slots
= ma_slots(node
, type
);
4966 gaps
= ma_gaps(node
, type
);
4967 offset
= mas
->offset
;
4968 min
= mas_safe_min(mas
, pivots
, offset
);
4969 /* Skip out of bounds. */
4970 while (mas
->last
< min
)
4971 min
= mas_safe_min(mas
, pivots
, --offset
);
4973 max
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4974 while (mas
->index
<= max
) {
4978 else if (!mas_slot(mas
, slots
, offset
))
4979 gap
= max
- min
+ 1;
4982 if ((size
<= gap
) && (size
<= mas
->last
- min
+ 1))
4986 /* Skip the next slot, it cannot be a gap. */
4991 max
= pivots
[offset
];
4992 min
= mas_safe_min(mas
, pivots
, offset
);
5002 min
= mas_safe_min(mas
, pivots
, offset
);
5005 if (unlikely((mas
->index
> max
) || (size
- 1 > max
- mas
->index
)))
5008 if (unlikely(ma_is_leaf(type
))) {
5009 mas
->offset
= offset
;
5011 *gap_max
= min
+ gap
- 1;
5015 /* descend, only happens under lock. */
5016 mas
->node
= mas_slot(mas
, slots
, offset
);
5019 mas
->offset
= mas_data_end(mas
);
5023 if (!mte_is_root(mas
->node
))
5027 mas_set_err(mas
, -EBUSY
);
5031 static inline bool mas_anode_descend(struct ma_state
*mas
, unsigned long size
)
5033 enum maple_type type
= mte_node_type(mas
->node
);
5034 unsigned long pivot
, min
, gap
= 0;
5035 unsigned char offset
, data_end
;
5036 unsigned long *gaps
, *pivots
;
5038 struct maple_node
*node
;
5041 if (ma_is_dense(type
)) {
5042 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
5047 pivots
= ma_pivots(node
, type
);
5048 slots
= ma_slots(node
, type
);
5049 gaps
= ma_gaps(node
, type
);
5050 offset
= mas
->offset
;
5051 min
= mas_safe_min(mas
, pivots
, offset
);
5052 data_end
= ma_data_end(node
, type
, pivots
, mas
->max
);
5053 for (; offset
<= data_end
; offset
++) {
5054 pivot
= mas_logical_pivot(mas
, pivots
, offset
, type
);
5056 /* Not within lower bounds */
5057 if (mas
->index
> pivot
)
5062 else if (!mas_slot(mas
, slots
, offset
))
5063 gap
= min(pivot
, mas
->last
) - max(mas
->index
, min
) + 1;
5068 if (ma_is_leaf(type
)) {
5072 if (mas
->index
<= pivot
) {
5073 mas
->node
= mas_slot(mas
, slots
, offset
);
5082 if (mas
->last
<= pivot
) {
5083 mas_set_err(mas
, -EBUSY
);
5088 if (mte_is_root(mas
->node
))
5091 mas
->offset
= offset
;
5096 * mas_walk() - Search for @mas->index in the tree.
5097 * @mas: The maple state.
5099 * mas->index and mas->last will be set to the range if there is a value. If
5100 * mas->node is MAS_NONE, reset to MAS_START.
5102 * Return: the entry at the location or %NULL.
5104 void *mas_walk(struct ma_state
*mas
)
5109 entry
= mas_state_walk(mas
);
5110 if (mas_is_start(mas
))
5113 if (mas_is_ptr(mas
)) {
5118 mas
->last
= ULONG_MAX
;
5123 if (mas_is_none(mas
)) {
5125 mas
->last
= ULONG_MAX
;
5130 EXPORT_SYMBOL_GPL(mas_walk
);
5132 static inline bool mas_rewind_node(struct ma_state
*mas
)
5137 if (mte_is_root(mas
->node
)) {
5147 mas
->offset
= --slot
;
5152 * mas_skip_node() - Internal function. Skip over a node.
5153 * @mas: The maple state.
5155 * Return: true if there is another node, false otherwise.
5157 static inline bool mas_skip_node(struct ma_state
*mas
)
5159 if (mas_is_err(mas
))
5163 if (mte_is_root(mas
->node
)) {
5164 if (mas
->offset
>= mas_data_end(mas
)) {
5165 mas_set_err(mas
, -EBUSY
);
5171 } while (mas
->offset
>= mas_data_end(mas
));
5178 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5180 * @mas: The maple state
5181 * @size: The size of the gap required
5183 * Search between @mas->index and @mas->last for a gap of @size.
5185 static inline void mas_awalk(struct ma_state
*mas
, unsigned long size
)
5187 struct maple_enode
*last
= NULL
;
5190 * There are 4 options:
5191 * go to child (descend)
5192 * go back to parent (ascend)
5193 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5194 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5196 while (!mas_is_err(mas
) && !mas_anode_descend(mas
, size
)) {
5197 if (last
== mas
->node
)
5205 * mas_fill_gap() - Fill a located gap with @entry.
5206 * @mas: The maple state
5207 * @entry: The value to store
5208 * @slot: The offset into the node to store the @entry
5209 * @size: The size of the entry
5210 * @index: The start location
5212 static inline void mas_fill_gap(struct ma_state
*mas
, void *entry
,
5213 unsigned char slot
, unsigned long size
, unsigned long *index
)
5215 MA_WR_STATE(wr_mas
, mas
, entry
);
5216 unsigned char pslot
= mte_parent_slot(mas
->node
);
5217 struct maple_enode
*mn
= mas
->node
;
5218 unsigned long *pivots
;
5219 enum maple_type ptype
;
5221 * mas->index is the start address for the search
5222 * which may no longer be needed.
5223 * mas->last is the end address for the search
5226 *index
= mas
->index
;
5227 mas
->last
= mas
->index
+ size
- 1;
5230 * It is possible that using mas->max and mas->min to correctly
5231 * calculate the index and last will cause an issue in the gap
5232 * calculation, so fix the ma_state here
5235 ptype
= mte_node_type(mas
->node
);
5236 pivots
= ma_pivots(mas_mn(mas
), ptype
);
5237 mas
->max
= mas_safe_pivot(mas
, pivots
, pslot
, ptype
);
5238 mas
->min
= mas_safe_min(mas
, pivots
, pslot
);
5241 mas_wr_store_entry(&wr_mas
);
5245 * mas_sparse_area() - Internal function. Return upper or lower limit when
5246 * searching for a gap in an empty tree.
5247 * @mas: The maple state
5248 * @min: the minimum range
5249 * @max: The maximum range
5250 * @size: The size of the gap
5251 * @fwd: Searching forward or back
5253 static inline int mas_sparse_area(struct ma_state
*mas
, unsigned long min
,
5254 unsigned long max
, unsigned long size
, bool fwd
)
5256 if (!unlikely(mas_is_none(mas
)) && min
== 0) {
5259 * At this time, min is increased, we need to recheck whether
5260 * the size is satisfied.
5262 if (min
> max
|| max
- min
+ 1 < size
)
5269 mas
->last
= min
+ size
- 1;
5272 mas
->index
= max
- size
+ 1;
5278 * mas_empty_area() - Get the lowest address within the range that is
5279 * sufficient for the size requested.
5280 * @mas: The maple state
5281 * @min: The lowest value of the range
5282 * @max: The highest value of the range
5283 * @size: The size needed
5285 int mas_empty_area(struct ma_state
*mas
, unsigned long min
,
5286 unsigned long max
, unsigned long size
)
5288 unsigned char offset
;
5289 unsigned long *pivots
;
5295 if (mas_is_start(mas
))
5297 else if (mas
->offset
>= 2)
5299 else if (!mas_skip_node(mas
))
5303 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5304 return mas_sparse_area(mas
, min
, max
, size
, true);
5306 /* The start of the window can only be within these values */
5309 mas_awalk(mas
, size
);
5311 if (unlikely(mas_is_err(mas
)))
5312 return xa_err(mas
->node
);
5314 offset
= mas
->offset
;
5315 if (unlikely(offset
== MAPLE_NODE_SLOTS
))
5318 mt
= mte_node_type(mas
->node
);
5319 pivots
= ma_pivots(mas_mn(mas
), mt
);
5320 min
= mas_safe_min(mas
, pivots
, offset
);
5321 if (mas
->index
< min
)
5323 mas
->last
= mas
->index
+ size
- 1;
5326 EXPORT_SYMBOL_GPL(mas_empty_area
);
5329 * mas_empty_area_rev() - Get the highest address within the range that is
5330 * sufficient for the size requested.
5331 * @mas: The maple state
5332 * @min: The lowest value of the range
5333 * @max: The highest value of the range
5334 * @size: The size needed
5336 int mas_empty_area_rev(struct ma_state
*mas
, unsigned long min
,
5337 unsigned long max
, unsigned long size
)
5339 struct maple_enode
*last
= mas
->node
;
5344 if (mas_is_start(mas
)) {
5346 mas
->offset
= mas_data_end(mas
);
5347 } else if (mas
->offset
>= 2) {
5349 } else if (!mas_rewind_node(mas
)) {
5354 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5355 return mas_sparse_area(mas
, min
, max
, size
, false);
5357 /* The start of the window can only be within these values. */
5361 while (!mas_rev_awalk(mas
, size
, &min
, &max
)) {
5362 if (last
== mas
->node
) {
5363 if (!mas_rewind_node(mas
))
5370 if (mas_is_err(mas
))
5371 return xa_err(mas
->node
);
5373 if (unlikely(mas
->offset
== MAPLE_NODE_SLOTS
))
5376 /* Trim the upper limit to the max. */
5377 if (max
<= mas
->last
)
5380 mas
->index
= mas
->last
- size
+ 1;
5383 EXPORT_SYMBOL_GPL(mas_empty_area_rev
);
5385 static inline int mas_alloc(struct ma_state
*mas
, void *entry
,
5386 unsigned long size
, unsigned long *index
)
5391 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
5392 mas_root_expand(mas
, entry
);
5393 if (mas_is_err(mas
))
5394 return xa_err(mas
->node
);
5397 return mte_pivot(mas
->node
, 0);
5398 return mte_pivot(mas
->node
, 1);
5401 /* Must be walking a tree. */
5402 mas_awalk(mas
, size
);
5403 if (mas_is_err(mas
))
5404 return xa_err(mas
->node
);
5406 if (mas
->offset
== MAPLE_NODE_SLOTS
)
5410 * At this point, mas->node points to the right node and we have an
5411 * offset that has a sufficient gap.
5415 min
= mte_pivot(mas
->node
, mas
->offset
- 1) + 1;
5417 if (mas
->index
< min
)
5420 mas_fill_gap(mas
, entry
, mas
->offset
, size
, index
);
5427 static inline int mas_rev_alloc(struct ma_state
*mas
, unsigned long min
,
5428 unsigned long max
, void *entry
,
5429 unsigned long size
, unsigned long *index
)
5433 ret
= mas_empty_area_rev(mas
, min
, max
, size
);
5437 if (mas_is_err(mas
))
5438 return xa_err(mas
->node
);
5440 if (mas
->offset
== MAPLE_NODE_SLOTS
)
5443 mas_fill_gap(mas
, entry
, mas
->offset
, size
, index
);
5451 * mte_dead_leaves() - Mark all leaves of a node as dead.
5452 * @mas: The maple state
5453 * @slots: Pointer to the slot array
5454 * @type: The maple node type
5456 * Must hold the write lock.
5458 * Return: The number of leaves marked as dead.
5461 unsigned char mte_dead_leaves(struct maple_enode
*enode
, struct maple_tree
*mt
,
5464 struct maple_node
*node
;
5465 enum maple_type type
;
5469 for (offset
= 0; offset
< mt_slot_count(enode
); offset
++) {
5470 entry
= mt_slot(mt
, slots
, offset
);
5471 type
= mte_node_type(entry
);
5472 node
= mte_to_node(entry
);
5473 /* Use both node and type to catch LE & BE metadata */
5477 mte_set_node_dead(entry
);
5479 rcu_assign_pointer(slots
[offset
], node
);
5486 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5487 * @enode: The maple encoded node
5488 * @offset: The starting offset
5490 * Note: This can only be used from the RCU callback context.
5492 static void __rcu
**mte_dead_walk(struct maple_enode
**enode
, unsigned char offset
)
5494 struct maple_node
*node
, *next
;
5495 void __rcu
**slots
= NULL
;
5497 next
= mte_to_node(*enode
);
5499 *enode
= ma_enode_ptr(next
);
5500 node
= mte_to_node(*enode
);
5501 slots
= ma_slots(node
, node
->type
);
5502 next
= rcu_dereference_protected(slots
[offset
],
5503 lock_is_held(&rcu_callback_map
));
5505 } while (!ma_is_leaf(next
->type
));
5511 * mt_free_walk() - Walk & free a tree in the RCU callback context
5512 * @head: The RCU head that's within the node.
5514 * Note: This can only be used from the RCU callback context.
5516 static void mt_free_walk(struct rcu_head
*head
)
5519 struct maple_node
*node
, *start
;
5520 struct maple_enode
*enode
;
5521 unsigned char offset
;
5522 enum maple_type type
;
5524 node
= container_of(head
, struct maple_node
, rcu
);
5526 if (ma_is_leaf(node
->type
))
5530 enode
= mt_mk_node(node
, node
->type
);
5531 slots
= mte_dead_walk(&enode
, 0);
5532 node
= mte_to_node(enode
);
5534 mt_free_bulk(node
->slot_len
, slots
);
5535 offset
= node
->parent_slot
+ 1;
5536 enode
= node
->piv_parent
;
5537 if (mte_to_node(enode
) == node
)
5540 type
= mte_node_type(enode
);
5541 slots
= ma_slots(mte_to_node(enode
), type
);
5542 if ((offset
< mt_slots
[type
]) &&
5543 rcu_dereference_protected(slots
[offset
],
5544 lock_is_held(&rcu_callback_map
)))
5545 slots
= mte_dead_walk(&enode
, offset
);
5546 node
= mte_to_node(enode
);
5547 } while ((node
!= start
) || (node
->slot_len
< offset
));
5549 slots
= ma_slots(node
, node
->type
);
5550 mt_free_bulk(node
->slot_len
, slots
);
5553 mt_free_rcu(&node
->rcu
);
5556 static inline void __rcu
**mte_destroy_descend(struct maple_enode
**enode
,
5557 struct maple_tree
*mt
, struct maple_enode
*prev
, unsigned char offset
)
5559 struct maple_node
*node
;
5560 struct maple_enode
*next
= *enode
;
5561 void __rcu
**slots
= NULL
;
5562 enum maple_type type
;
5563 unsigned char next_offset
= 0;
5567 node
= mte_to_node(*enode
);
5568 type
= mte_node_type(*enode
);
5569 slots
= ma_slots(node
, type
);
5570 next
= mt_slot_locked(mt
, slots
, next_offset
);
5571 if ((mte_dead_node(next
)))
5572 next
= mt_slot_locked(mt
, slots
, ++next_offset
);
5574 mte_set_node_dead(*enode
);
5576 node
->piv_parent
= prev
;
5577 node
->parent_slot
= offset
;
5578 offset
= next_offset
;
5581 } while (!mte_is_leaf(next
));
5586 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
5590 struct maple_node
*node
= mte_to_node(enode
);
5591 struct maple_enode
*start
;
5593 if (mte_is_leaf(enode
)) {
5594 node
->type
= mte_node_type(enode
);
5599 slots
= mte_destroy_descend(&enode
, mt
, start
, 0);
5600 node
= mte_to_node(enode
); // Updated in the above call.
5602 enum maple_type type
;
5603 unsigned char offset
;
5604 struct maple_enode
*parent
, *tmp
;
5606 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5608 mt_free_bulk(node
->slot_len
, slots
);
5609 offset
= node
->parent_slot
+ 1;
5610 enode
= node
->piv_parent
;
5611 if (mte_to_node(enode
) == node
)
5614 type
= mte_node_type(enode
);
5615 slots
= ma_slots(mte_to_node(enode
), type
);
5616 if (offset
>= mt_slots
[type
])
5619 tmp
= mt_slot_locked(mt
, slots
, offset
);
5620 if (mte_node_type(tmp
) && mte_to_node(tmp
)) {
5623 slots
= mte_destroy_descend(&enode
, mt
, parent
, offset
);
5626 node
= mte_to_node(enode
);
5627 } while (start
!= enode
);
5629 node
= mte_to_node(enode
);
5630 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5632 mt_free_bulk(node
->slot_len
, slots
);
5636 mt_free_rcu(&node
->rcu
);
5638 mt_clear_meta(mt
, node
, node
->type
);
5642 * mte_destroy_walk() - Free a tree or sub-tree.
5643 * @enode: the encoded maple node (maple_enode) to start
5644 * @mt: the tree to free - needed for node types.
5646 * Must hold the write lock.
5648 static inline void mte_destroy_walk(struct maple_enode
*enode
,
5649 struct maple_tree
*mt
)
5651 struct maple_node
*node
= mte_to_node(enode
);
5653 if (mt_in_rcu(mt
)) {
5654 mt_destroy_walk(enode
, mt
, false);
5655 call_rcu(&node
->rcu
, mt_free_walk
);
5657 mt_destroy_walk(enode
, mt
, true);
5661 static void mas_wr_store_setup(struct ma_wr_state
*wr_mas
)
5663 if (unlikely(mas_is_paused(wr_mas
->mas
)))
5664 mas_reset(wr_mas
->mas
);
5666 if (!mas_is_start(wr_mas
->mas
)) {
5667 if (mas_is_none(wr_mas
->mas
)) {
5668 mas_reset(wr_mas
->mas
);
5670 wr_mas
->r_max
= wr_mas
->mas
->max
;
5671 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
5672 if (mas_is_span_wr(wr_mas
))
5673 mas_reset(wr_mas
->mas
);
5681 * mas_store() - Store an @entry.
5682 * @mas: The maple state.
5683 * @entry: The entry to store.
5685 * The @mas->index and @mas->last is used to set the range for the @entry.
5686 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5687 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5689 * Return: the first entry between mas->index and mas->last or %NULL.
5691 void *mas_store(struct ma_state
*mas
, void *entry
)
5693 MA_WR_STATE(wr_mas
, mas
, entry
);
5695 trace_ma_write(__func__
, mas
, 0, entry
);
5696 #ifdef CONFIG_DEBUG_MAPLE_TREE
5697 if (mas
->index
> mas
->last
)
5698 pr_err("Error %lu > %lu %p\n", mas
->index
, mas
->last
, entry
);
5699 MT_BUG_ON(mas
->tree
, mas
->index
> mas
->last
);
5700 if (mas
->index
> mas
->last
) {
5701 mas_set_err(mas
, -EINVAL
);
5708 * Storing is the same operation as insert with the added caveat that it
5709 * can overwrite entries. Although this seems simple enough, one may
5710 * want to examine what happens if a single store operation was to
5711 * overwrite multiple entries within a self-balancing B-Tree.
5713 mas_wr_store_setup(&wr_mas
);
5714 mas_wr_store_entry(&wr_mas
);
5715 return wr_mas
.content
;
5717 EXPORT_SYMBOL_GPL(mas_store
);
5720 * mas_store_gfp() - Store a value into the tree.
5721 * @mas: The maple state
5722 * @entry: The entry to store
5723 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5725 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5728 int mas_store_gfp(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5730 MA_WR_STATE(wr_mas
, mas
, entry
);
5732 mas_wr_store_setup(&wr_mas
);
5733 trace_ma_write(__func__
, mas
, 0, entry
);
5735 mas_wr_store_entry(&wr_mas
);
5736 if (unlikely(mas_nomem(mas
, gfp
)))
5739 if (unlikely(mas_is_err(mas
)))
5740 return xa_err(mas
->node
);
5744 EXPORT_SYMBOL_GPL(mas_store_gfp
);
5747 * mas_store_prealloc() - Store a value into the tree using memory
5748 * preallocated in the maple state.
5749 * @mas: The maple state
5750 * @entry: The entry to store.
5752 void mas_store_prealloc(struct ma_state
*mas
, void *entry
)
5754 MA_WR_STATE(wr_mas
, mas
, entry
);
5756 mas_wr_store_setup(&wr_mas
);
5757 trace_ma_write(__func__
, mas
, 0, entry
);
5758 mas_wr_store_entry(&wr_mas
);
5759 BUG_ON(mas_is_err(mas
));
5762 EXPORT_SYMBOL_GPL(mas_store_prealloc
);
5765 * mas_preallocate() - Preallocate enough nodes for a store operation
5766 * @mas: The maple state
5767 * @gfp: The GFP_FLAGS to use for allocations.
5769 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5771 int mas_preallocate(struct ma_state
*mas
, gfp_t gfp
)
5775 mas_node_count_gfp(mas
, 1 + mas_mt_height(mas
) * 3, gfp
);
5776 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5777 if (likely(!mas_is_err(mas
)))
5780 mas_set_alloc_req(mas
, 0);
5781 ret
= xa_err(mas
->node
);
5787 EXPORT_SYMBOL_GPL(mas_preallocate
);
5790 * mas_destroy() - destroy a maple state.
5791 * @mas: The maple state
5793 * Upon completion, check the left-most node and rebalance against the node to
5794 * the right if necessary. Frees any allocated nodes associated with this maple
5797 void mas_destroy(struct ma_state
*mas
)
5799 struct maple_alloc
*node
;
5800 unsigned long total
;
5803 * When using mas_for_each() to insert an expected number of elements,
5804 * it is possible that the number inserted is less than the expected
5805 * number. To fix an invalid final node, a check is performed here to
5806 * rebalance the previous node with the final node.
5808 if (mas
->mas_flags
& MA_STATE_REBALANCE
) {
5811 if (mas_is_start(mas
))
5814 mtree_range_walk(mas
);
5815 end
= mas_data_end(mas
) + 1;
5816 if (end
< mt_min_slot_count(mas
->node
) - 1)
5817 mas_destroy_rebalance(mas
, end
);
5819 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
5821 mas
->mas_flags
&= ~(MA_STATE_BULK
|MA_STATE_PREALLOC
);
5823 total
= mas_allocated(mas
);
5826 mas
->alloc
= node
->slot
[0];
5827 if (node
->node_count
> 1) {
5828 size_t count
= node
->node_count
- 1;
5830 mt_free_bulk(count
, (void __rcu
**)&node
->slot
[1]);
5833 kmem_cache_free(maple_node_cache
, node
);
5839 EXPORT_SYMBOL_GPL(mas_destroy
);
5842 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5843 * @mas: The maple state
5844 * @nr_entries: The number of expected entries.
5846 * This will attempt to pre-allocate enough nodes to store the expected number
5847 * of entries. The allocations will occur using the bulk allocator interface
5848 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5849 * to ensure any unused nodes are freed.
5851 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5853 int mas_expected_entries(struct ma_state
*mas
, unsigned long nr_entries
)
5855 int nonleaf_cap
= MAPLE_ARANGE64_SLOTS
- 2;
5856 struct maple_enode
*enode
= mas
->node
;
5861 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5862 * forking a process and duplicating the VMAs from one tree to a new
5863 * tree. When such a situation arises, it is known that the new tree is
5864 * not going to be used until the entire tree is populated. For
5865 * performance reasons, it is best to use a bulk load with RCU disabled.
5866 * This allows for optimistic splitting that favours the left and reuse
5867 * of nodes during the operation.
5870 /* Optimize splitting for bulk insert in-order */
5871 mas
->mas_flags
|= MA_STATE_BULK
;
5874 * Avoid overflow, assume a gap between each entry and a trailing null.
5875 * If this is wrong, it just means allocation can happen during
5876 * insertion of entries.
5878 nr_nodes
= max(nr_entries
, nr_entries
* 2 + 1);
5879 if (!mt_is_alloc(mas
->tree
))
5880 nonleaf_cap
= MAPLE_RANGE64_SLOTS
- 2;
5882 /* Leaves; reduce slots to keep space for expansion */
5883 nr_nodes
= DIV_ROUND_UP(nr_nodes
, MAPLE_RANGE64_SLOTS
- 2);
5884 /* Internal nodes */
5885 nr_nodes
+= DIV_ROUND_UP(nr_nodes
, nonleaf_cap
);
5886 /* Add working room for split (2 nodes) + new parents */
5887 mas_node_count(mas
, nr_nodes
+ 3);
5889 /* Detect if allocations run out */
5890 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5892 if (!mas_is_err(mas
))
5895 ret
= xa_err(mas
->node
);
5901 EXPORT_SYMBOL_GPL(mas_expected_entries
);
5904 * mas_next() - Get the next entry.
5905 * @mas: The maple state
5906 * @max: The maximum index to check.
5908 * Returns the next entry after @mas->index.
5909 * Must hold rcu_read_lock or the write lock.
5910 * Can return the zero entry.
5912 * Return: The next entry or %NULL
5914 void *mas_next(struct ma_state
*mas
, unsigned long max
)
5916 if (mas_is_none(mas
) || mas_is_paused(mas
))
5917 mas
->node
= MAS_START
;
5919 if (mas_is_start(mas
))
5920 mas_walk(mas
); /* Retries on dead nodes handled by mas_walk */
5922 if (mas_is_ptr(mas
)) {
5925 mas
->last
= ULONG_MAX
;
5930 if (mas
->last
== ULONG_MAX
)
5933 /* Retries on dead nodes handled by mas_next_entry */
5934 return mas_next_entry(mas
, max
);
5936 EXPORT_SYMBOL_GPL(mas_next
);
5939 * mt_next() - get the next value in the maple tree
5940 * @mt: The maple tree
5941 * @index: The start index
5942 * @max: The maximum index to check
5944 * Return: The entry at @index or higher, or %NULL if nothing is found.
5946 void *mt_next(struct maple_tree
*mt
, unsigned long index
, unsigned long max
)
5949 MA_STATE(mas
, mt
, index
, index
);
5952 entry
= mas_next(&mas
, max
);
5956 EXPORT_SYMBOL_GPL(mt_next
);
5959 * mas_prev() - Get the previous entry
5960 * @mas: The maple state
5961 * @min: The minimum value to check.
5963 * Must hold rcu_read_lock or the write lock.
5964 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5967 * Return: the previous value or %NULL.
5969 void *mas_prev(struct ma_state
*mas
, unsigned long min
)
5972 /* Nothing comes before 0 */
5974 mas
->node
= MAS_NONE
;
5978 if (unlikely(mas_is_ptr(mas
)))
5981 if (mas_is_none(mas
) || mas_is_paused(mas
))
5982 mas
->node
= MAS_START
;
5984 if (mas_is_start(mas
)) {
5990 if (mas_is_ptr(mas
)) {
5996 mas
->index
= mas
->last
= 0;
5997 return mas_root_locked(mas
);
5999 return mas_prev_entry(mas
, min
);
6001 EXPORT_SYMBOL_GPL(mas_prev
);
6004 * mt_prev() - get the previous value in the maple tree
6005 * @mt: The maple tree
6006 * @index: The start index
6007 * @min: The minimum index to check
6009 * Return: The entry at @index or lower, or %NULL if nothing is found.
6011 void *mt_prev(struct maple_tree
*mt
, unsigned long index
, unsigned long min
)
6014 MA_STATE(mas
, mt
, index
, index
);
6017 entry
= mas_prev(&mas
, min
);
6021 EXPORT_SYMBOL_GPL(mt_prev
);
6024 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6025 * @mas: The maple state to pause
6027 * Some users need to pause a walk and drop the lock they're holding in
6028 * order to yield to a higher priority thread or carry out an operation
6029 * on an entry. Those users should call this function before they drop
6030 * the lock. It resets the @mas to be suitable for the next iteration
6031 * of the loop after the user has reacquired the lock. If most entries
6032 * found during a walk require you to call mas_pause(), the mt_for_each()
6033 * iterator may be more appropriate.
6036 void mas_pause(struct ma_state
*mas
)
6038 mas
->node
= MAS_PAUSE
;
6040 EXPORT_SYMBOL_GPL(mas_pause
);
6043 * mas_find() - On the first call, find the entry at or after mas->index up to
6044 * %max. Otherwise, find the entry after mas->index.
6045 * @mas: The maple state
6046 * @max: The maximum value to check.
6048 * Must hold rcu_read_lock or the write lock.
6049 * If an entry exists, last and index are updated accordingly.
6050 * May set @mas->node to MAS_NONE.
6052 * Return: The entry or %NULL.
6054 void *mas_find(struct ma_state
*mas
, unsigned long max
)
6056 if (unlikely(mas_is_paused(mas
))) {
6057 if (unlikely(mas
->last
== ULONG_MAX
)) {
6058 mas
->node
= MAS_NONE
;
6061 mas
->node
= MAS_START
;
6062 mas
->index
= ++mas
->last
;
6065 if (unlikely(mas_is_none(mas
)))
6066 mas
->node
= MAS_START
;
6068 if (unlikely(mas_is_start(mas
))) {
6069 /* First run or continue */
6072 if (mas
->index
> max
)
6075 entry
= mas_walk(mas
);
6080 if (unlikely(!mas_searchable(mas
)))
6083 /* Retries on dead nodes handled by mas_next_entry */
6084 return mas_next_entry(mas
, max
);
6086 EXPORT_SYMBOL_GPL(mas_find
);
6089 * mas_find_rev: On the first call, find the first non-null entry at or below
6090 * mas->index down to %min. Otherwise find the first non-null entry below
6091 * mas->index down to %min.
6092 * @mas: The maple state
6093 * @min: The minimum value to check.
6095 * Must hold rcu_read_lock or the write lock.
6096 * If an entry exists, last and index are updated accordingly.
6097 * May set @mas->node to MAS_NONE.
6099 * Return: The entry or %NULL.
6101 void *mas_find_rev(struct ma_state
*mas
, unsigned long min
)
6103 if (unlikely(mas_is_paused(mas
))) {
6104 if (unlikely(mas
->last
== ULONG_MAX
)) {
6105 mas
->node
= MAS_NONE
;
6108 mas
->node
= MAS_START
;
6109 mas
->last
= --mas
->index
;
6112 if (unlikely(mas_is_start(mas
))) {
6113 /* First run or continue */
6116 if (mas
->index
< min
)
6119 entry
= mas_walk(mas
);
6124 if (unlikely(!mas_searchable(mas
)))
6127 if (mas
->index
< min
)
6130 /* Retries on dead nodes handled by mas_prev_entry */
6131 return mas_prev_entry(mas
, min
);
6133 EXPORT_SYMBOL_GPL(mas_find_rev
);
6136 * mas_erase() - Find the range in which index resides and erase the entire
6138 * @mas: The maple state
6140 * Must hold the write lock.
6141 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6142 * erases that range.
6144 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6146 void *mas_erase(struct ma_state
*mas
)
6149 MA_WR_STATE(wr_mas
, mas
, NULL
);
6151 if (mas_is_none(mas
) || mas_is_paused(mas
))
6152 mas
->node
= MAS_START
;
6154 /* Retry unnecessary when holding the write lock. */
6155 entry
= mas_state_walk(mas
);
6160 /* Must reset to ensure spanning writes of last slot are detected */
6162 mas_wr_store_setup(&wr_mas
);
6163 mas_wr_store_entry(&wr_mas
);
6164 if (mas_nomem(mas
, GFP_KERNEL
))
6169 EXPORT_SYMBOL_GPL(mas_erase
);
6172 * mas_nomem() - Check if there was an error allocating and do the allocation
6173 * if necessary If there are allocations, then free them.
6174 * @mas: The maple state
6175 * @gfp: The GFP_FLAGS to use for allocations
6176 * Return: true on allocation, false otherwise.
6178 bool mas_nomem(struct ma_state
*mas
, gfp_t gfp
)
6179 __must_hold(mas
->tree
->lock
)
6181 if (likely(mas
->node
!= MA_ERROR(-ENOMEM
))) {
6186 if (gfpflags_allow_blocking(gfp
) && !mt_external_lock(mas
->tree
)) {
6187 mtree_unlock(mas
->tree
);
6188 mas_alloc_nodes(mas
, gfp
);
6189 mtree_lock(mas
->tree
);
6191 mas_alloc_nodes(mas
, gfp
);
6194 if (!mas_allocated(mas
))
6197 mas
->node
= MAS_START
;
6201 void __init
maple_tree_init(void)
6203 maple_node_cache
= kmem_cache_create("maple_node",
6204 sizeof(struct maple_node
), sizeof(struct maple_node
),
6209 * mtree_load() - Load a value stored in a maple tree
6210 * @mt: The maple tree
6211 * @index: The index to load
6213 * Return: the entry or %NULL
6215 void *mtree_load(struct maple_tree
*mt
, unsigned long index
)
6217 MA_STATE(mas
, mt
, index
, index
);
6220 trace_ma_read(__func__
, &mas
);
6223 entry
= mas_start(&mas
);
6224 if (unlikely(mas_is_none(&mas
)))
6227 if (unlikely(mas_is_ptr(&mas
))) {
6234 entry
= mtree_lookup_walk(&mas
);
6235 if (!entry
&& unlikely(mas_is_start(&mas
)))
6239 if (xa_is_zero(entry
))
6244 EXPORT_SYMBOL(mtree_load
);
6247 * mtree_store_range() - Store an entry at a given range.
6248 * @mt: The maple tree
6249 * @index: The start of the range
6250 * @last: The end of the range
6251 * @entry: The entry to store
6252 * @gfp: The GFP_FLAGS to use for allocations
6254 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6257 int mtree_store_range(struct maple_tree
*mt
, unsigned long index
,
6258 unsigned long last
, void *entry
, gfp_t gfp
)
6260 MA_STATE(mas
, mt
, index
, last
);
6261 MA_WR_STATE(wr_mas
, &mas
, entry
);
6263 trace_ma_write(__func__
, &mas
, 0, entry
);
6264 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6272 mas_wr_store_entry(&wr_mas
);
6273 if (mas_nomem(&mas
, gfp
))
6277 if (mas_is_err(&mas
))
6278 return xa_err(mas
.node
);
6282 EXPORT_SYMBOL(mtree_store_range
);
6285 * mtree_store() - Store an entry at a given index.
6286 * @mt: The maple tree
6287 * @index: The index to store the value
6288 * @entry: The entry to store
6289 * @gfp: The GFP_FLAGS to use for allocations
6291 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6294 int mtree_store(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6297 return mtree_store_range(mt
, index
, index
, entry
, gfp
);
6299 EXPORT_SYMBOL(mtree_store
);
6302 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6303 * @mt: The maple tree
6304 * @first: The start of the range
6305 * @last: The end of the range
6306 * @entry: The entry to store
6307 * @gfp: The GFP_FLAGS to use for allocations.
6309 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6310 * request, -ENOMEM if memory could not be allocated.
6312 int mtree_insert_range(struct maple_tree
*mt
, unsigned long first
,
6313 unsigned long last
, void *entry
, gfp_t gfp
)
6315 MA_STATE(ms
, mt
, first
, last
);
6317 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6325 mas_insert(&ms
, entry
);
6326 if (mas_nomem(&ms
, gfp
))
6330 if (mas_is_err(&ms
))
6331 return xa_err(ms
.node
);
6335 EXPORT_SYMBOL(mtree_insert_range
);
6338 * mtree_insert() - Insert an entry at a give index if there is no value.
6339 * @mt: The maple tree
6340 * @index : The index to store the value
6341 * @entry: The entry to store
6342 * @gfp: The FGP_FLAGS to use for allocations.
6344 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6345 * request, -ENOMEM if memory could not be allocated.
6347 int mtree_insert(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6350 return mtree_insert_range(mt
, index
, index
, entry
, gfp
);
6352 EXPORT_SYMBOL(mtree_insert
);
6354 int mtree_alloc_range(struct maple_tree
*mt
, unsigned long *startp
,
6355 void *entry
, unsigned long size
, unsigned long min
,
6356 unsigned long max
, gfp_t gfp
)
6360 MA_STATE(mas
, mt
, min
, max
- size
);
6361 if (!mt_is_alloc(mt
))
6364 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6380 mas
.last
= max
- size
;
6381 ret
= mas_alloc(&mas
, entry
, size
, startp
);
6382 if (mas_nomem(&mas
, gfp
))
6388 EXPORT_SYMBOL(mtree_alloc_range
);
6390 int mtree_alloc_rrange(struct maple_tree
*mt
, unsigned long *startp
,
6391 void *entry
, unsigned long size
, unsigned long min
,
6392 unsigned long max
, gfp_t gfp
)
6396 MA_STATE(mas
, mt
, min
, max
- size
);
6397 if (!mt_is_alloc(mt
))
6400 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6414 ret
= mas_rev_alloc(&mas
, min
, max
, entry
, size
, startp
);
6415 if (mas_nomem(&mas
, gfp
))
6421 EXPORT_SYMBOL(mtree_alloc_rrange
);
6424 * mtree_erase() - Find an index and erase the entire range.
6425 * @mt: The maple tree
6426 * @index: The index to erase
6428 * Erasing is the same as a walk to an entry then a store of a NULL to that
6429 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6431 * Return: The entry stored at the @index or %NULL
6433 void *mtree_erase(struct maple_tree
*mt
, unsigned long index
)
6437 MA_STATE(mas
, mt
, index
, index
);
6438 trace_ma_op(__func__
, &mas
);
6441 entry
= mas_erase(&mas
);
6446 EXPORT_SYMBOL(mtree_erase
);
6449 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6450 * @mt: The maple tree
6452 * Note: Does not handle locking.
6454 void __mt_destroy(struct maple_tree
*mt
)
6456 void *root
= mt_root_locked(mt
);
6458 rcu_assign_pointer(mt
->ma_root
, NULL
);
6459 if (xa_is_node(root
))
6460 mte_destroy_walk(root
, mt
);
6464 EXPORT_SYMBOL_GPL(__mt_destroy
);
6467 * mtree_destroy() - Destroy a maple tree
6468 * @mt: The maple tree
6470 * Frees all resources used by the tree. Handles locking.
6472 void mtree_destroy(struct maple_tree
*mt
)
6478 EXPORT_SYMBOL(mtree_destroy
);
6481 * mt_find() - Search from the start up until an entry is found.
6482 * @mt: The maple tree
6483 * @index: Pointer which contains the start location of the search
6484 * @max: The maximum value to check
6486 * Handles locking. @index will be incremented to one beyond the range.
6488 * Return: The entry at or after the @index or %NULL
6490 void *mt_find(struct maple_tree
*mt
, unsigned long *index
, unsigned long max
)
6492 MA_STATE(mas
, mt
, *index
, *index
);
6494 #ifdef CONFIG_DEBUG_MAPLE_TREE
6495 unsigned long copy
= *index
;
6498 trace_ma_read(__func__
, &mas
);
6505 entry
= mas_state_walk(&mas
);
6506 if (mas_is_start(&mas
))
6509 if (unlikely(xa_is_zero(entry
)))
6515 while (mas_searchable(&mas
) && (mas
.index
< max
)) {
6516 entry
= mas_next_entry(&mas
, max
);
6517 if (likely(entry
&& !xa_is_zero(entry
)))
6521 if (unlikely(xa_is_zero(entry
)))
6525 if (likely(entry
)) {
6526 *index
= mas
.last
+ 1;
6527 #ifdef CONFIG_DEBUG_MAPLE_TREE
6528 if ((*index
) && (*index
) <= copy
)
6529 pr_err("index not increased! %lx <= %lx\n",
6531 MT_BUG_ON(mt
, (*index
) && ((*index
) <= copy
));
6537 EXPORT_SYMBOL(mt_find
);
6540 * mt_find_after() - Search from the start up until an entry is found.
6541 * @mt: The maple tree
6542 * @index: Pointer which contains the start location of the search
6543 * @max: The maximum value to check
6545 * Handles locking, detects wrapping on index == 0
6547 * Return: The entry at or after the @index or %NULL
6549 void *mt_find_after(struct maple_tree
*mt
, unsigned long *index
,
6555 return mt_find(mt
, index
, max
);
6557 EXPORT_SYMBOL(mt_find_after
);
6559 #ifdef CONFIG_DEBUG_MAPLE_TREE
6560 atomic_t maple_tree_tests_run
;
6561 EXPORT_SYMBOL_GPL(maple_tree_tests_run
);
6562 atomic_t maple_tree_tests_passed
;
6563 EXPORT_SYMBOL_GPL(maple_tree_tests_passed
);
6566 extern void kmem_cache_set_non_kernel(struct kmem_cache
*, unsigned int);
6567 void mt_set_non_kernel(unsigned int val
)
6569 kmem_cache_set_non_kernel(maple_node_cache
, val
);
6572 extern unsigned long kmem_cache_get_alloc(struct kmem_cache
*);
6573 unsigned long mt_get_alloc_size(void)
6575 return kmem_cache_get_alloc(maple_node_cache
);
6578 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache
*);
6579 void mt_zero_nr_tallocated(void)
6581 kmem_cache_zero_nr_tallocated(maple_node_cache
);
6584 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache
*);
6585 unsigned int mt_nr_tallocated(void)
6587 return kmem_cache_nr_tallocated(maple_node_cache
);
6590 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache
*);
6591 unsigned int mt_nr_allocated(void)
6593 return kmem_cache_nr_allocated(maple_node_cache
);
6597 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6598 * @mas: The maple state
6599 * @index: The index to restore in @mas.
6601 * Used in test code.
6602 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6604 static inline int mas_dead_node(struct ma_state
*mas
, unsigned long index
)
6606 if (unlikely(!mas_searchable(mas
) || mas_is_start(mas
)))
6609 if (likely(!mte_dead_node(mas
->node
)))
6612 mas_rewalk(mas
, index
);
6616 void mt_cache_shrink(void)
6621 * mt_cache_shrink() - For testing, don't use this.
6623 * Certain testcases can trigger an OOM when combined with other memory
6624 * debugging configuration options. This function is used to reduce the
6625 * possibility of an out of memory even due to kmem_cache objects remaining
6626 * around for longer than usual.
6628 void mt_cache_shrink(void)
6630 kmem_cache_shrink(maple_node_cache
);
6633 EXPORT_SYMBOL_GPL(mt_cache_shrink
);
6635 #endif /* not defined __KERNEL__ */
6637 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6638 * @mas: The maple state
6639 * @offset: The offset into the slot array to fetch.
6641 * Return: The entry stored at @offset.
6643 static inline struct maple_enode
*mas_get_slot(struct ma_state
*mas
,
6644 unsigned char offset
)
6646 return mas_slot(mas
, ma_slots(mas_mn(mas
), mte_node_type(mas
->node
)),
6652 * mas_first_entry() - Go the first leaf and find the first entry.
6653 * @mas: the maple state.
6654 * @limit: the maximum index to check.
6655 * @*r_start: Pointer to set to the range start.
6657 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6659 * Return: The first entry or MAS_NONE.
6661 static inline void *mas_first_entry(struct ma_state
*mas
, struct maple_node
*mn
,
6662 unsigned long limit
, enum maple_type mt
)
6666 unsigned long *pivots
;
6670 mas
->index
= mas
->min
;
6671 if (mas
->index
> limit
)
6676 while (likely(!ma_is_leaf(mt
))) {
6677 MT_BUG_ON(mas
->tree
, mte_dead_node(mas
->node
));
6678 slots
= ma_slots(mn
, mt
);
6679 entry
= mas_slot(mas
, slots
, 0);
6680 pivots
= ma_pivots(mn
, mt
);
6681 if (unlikely(ma_dead_node(mn
)))
6686 mt
= mte_node_type(mas
->node
);
6688 MT_BUG_ON(mas
->tree
, mte_dead_node(mas
->node
));
6691 slots
= ma_slots(mn
, mt
);
6692 entry
= mas_slot(mas
, slots
, 0);
6693 if (unlikely(ma_dead_node(mn
)))
6696 /* Slot 0 or 1 must be set */
6697 if (mas
->index
> limit
)
6704 entry
= mas_slot(mas
, slots
, 1);
6705 pivots
= ma_pivots(mn
, mt
);
6706 if (unlikely(ma_dead_node(mn
)))
6709 mas
->index
= pivots
[0] + 1;
6710 if (mas
->index
> limit
)
6717 if (likely(!ma_dead_node(mn
)))
6718 mas
->node
= MAS_NONE
;
6722 /* Depth first search, post-order */
6723 static void mas_dfs_postorder(struct ma_state
*mas
, unsigned long max
)
6726 struct maple_enode
*p
= MAS_NONE
, *mn
= mas
->node
;
6727 unsigned long p_min
, p_max
;
6729 mas_next_node(mas
, mas_mn(mas
), max
);
6730 if (!mas_is_none(mas
))
6733 if (mte_is_root(mn
))
6738 while (mas
->node
!= MAS_NONE
) {
6742 mas_prev_node(mas
, 0);
6753 /* Tree validations */
6754 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
6755 unsigned long min
, unsigned long max
, unsigned int depth
);
6756 static void mt_dump_range(unsigned long min
, unsigned long max
,
6759 static const char spaces
[] = " ";
6762 pr_info("%.*s%lu: ", depth
* 2, spaces
, min
);
6764 pr_info("%.*s%lu-%lu: ", depth
* 2, spaces
, min
, max
);
6767 static void mt_dump_entry(void *entry
, unsigned long min
, unsigned long max
,
6770 mt_dump_range(min
, max
, depth
);
6772 if (xa_is_value(entry
))
6773 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry
),
6774 xa_to_value(entry
), entry
);
6775 else if (xa_is_zero(entry
))
6776 pr_cont("zero (%ld)\n", xa_to_internal(entry
));
6777 else if (mt_is_reserved(entry
))
6778 pr_cont("UNKNOWN ENTRY (%p)\n", entry
);
6780 pr_cont("%p\n", entry
);
6783 static void mt_dump_range64(const struct maple_tree
*mt
, void *entry
,
6784 unsigned long min
, unsigned long max
, unsigned int depth
)
6786 struct maple_range_64
*node
= &mte_to_node(entry
)->mr64
;
6787 bool leaf
= mte_is_leaf(entry
);
6788 unsigned long first
= min
;
6791 pr_cont(" contents: ");
6792 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
- 1; i
++)
6793 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
6794 pr_cont("%p\n", node
->slot
[i
]);
6795 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
; i
++) {
6796 unsigned long last
= max
;
6798 if (i
< (MAPLE_RANGE64_SLOTS
- 1))
6799 last
= node
->pivot
[i
];
6800 else if (!node
->slot
[i
] && max
!= mt_node_max(entry
))
6802 if (last
== 0 && i
> 0)
6805 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6806 first
, last
, depth
+ 1);
6807 else if (node
->slot
[i
])
6808 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
6809 first
, last
, depth
+ 1);
6814 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6815 node
, last
, max
, i
);
6822 static void mt_dump_arange64(const struct maple_tree
*mt
, void *entry
,
6823 unsigned long min
, unsigned long max
, unsigned int depth
)
6825 struct maple_arange_64
*node
= &mte_to_node(entry
)->ma64
;
6826 bool leaf
= mte_is_leaf(entry
);
6827 unsigned long first
= min
;
6830 pr_cont(" contents: ");
6831 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++)
6832 pr_cont("%lu ", node
->gap
[i
]);
6833 pr_cont("| %02X %02X| ", node
->meta
.end
, node
->meta
.gap
);
6834 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
- 1; i
++)
6835 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
6836 pr_cont("%p\n", node
->slot
[i
]);
6837 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
6838 unsigned long last
= max
;
6840 if (i
< (MAPLE_ARANGE64_SLOTS
- 1))
6841 last
= node
->pivot
[i
];
6842 else if (!node
->slot
[i
])
6844 if (last
== 0 && i
> 0)
6847 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6848 first
, last
, depth
+ 1);
6849 else if (node
->slot
[i
])
6850 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
6851 first
, last
, depth
+ 1);
6856 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6857 node
, last
, max
, i
);
6864 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
6865 unsigned long min
, unsigned long max
, unsigned int depth
)
6867 struct maple_node
*node
= mte_to_node(entry
);
6868 unsigned int type
= mte_node_type(entry
);
6871 mt_dump_range(min
, max
, depth
);
6873 pr_cont("node %p depth %d type %d parent %p", node
, depth
, type
,
6874 node
? node
->parent
: NULL
);
6878 for (i
= 0; i
< MAPLE_NODE_SLOTS
; i
++) {
6880 pr_cont("OUT OF RANGE: ");
6881 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
6882 min
+ i
, min
+ i
, depth
);
6886 case maple_range_64
:
6887 mt_dump_range64(mt
, entry
, min
, max
, depth
);
6889 case maple_arange_64
:
6890 mt_dump_arange64(mt
, entry
, min
, max
, depth
);
6894 pr_cont(" UNKNOWN TYPE\n");
6898 void mt_dump(const struct maple_tree
*mt
)
6900 void *entry
= rcu_dereference_check(mt
->ma_root
, mt_locked(mt
));
6902 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6903 mt
, mt
->ma_flags
, mt_height(mt
), entry
);
6904 if (!xa_is_node(entry
))
6905 mt_dump_entry(entry
, 0, 0, 0);
6907 mt_dump_node(mt
, entry
, 0, mt_node_max(entry
), 0);
6909 EXPORT_SYMBOL_GPL(mt_dump
);
6912 * Calculate the maximum gap in a node and check if that's what is reported in
6913 * the parent (unless root).
6915 static void mas_validate_gaps(struct ma_state
*mas
)
6917 struct maple_enode
*mte
= mas
->node
;
6918 struct maple_node
*p_mn
;
6919 unsigned long gap
= 0, max_gap
= 0;
6920 unsigned long p_end
, p_start
= mas
->min
;
6921 unsigned char p_slot
;
6922 unsigned long *gaps
= NULL
;
6923 unsigned long *pivots
= ma_pivots(mte_to_node(mte
), mte_node_type(mte
));
6926 if (ma_is_dense(mte_node_type(mte
))) {
6927 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
6928 if (mas_get_slot(mas
, i
)) {
6939 gaps
= ma_gaps(mte_to_node(mte
), mte_node_type(mte
));
6940 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
6941 p_end
= mas_logical_pivot(mas
, pivots
, i
, mte_node_type(mte
));
6944 if (mas_get_slot(mas
, i
)) {
6949 gap
+= p_end
- p_start
+ 1;
6951 void *entry
= mas_get_slot(mas
, i
);
6955 if (gap
!= p_end
- p_start
+ 1) {
6956 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6958 mas_get_slot(mas
, i
), gap
,
6962 MT_BUG_ON(mas
->tree
,
6963 gap
!= p_end
- p_start
+ 1);
6966 if (gap
> p_end
- p_start
+ 1) {
6967 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6968 mas_mn(mas
), i
, gap
, p_end
, p_start
,
6969 p_end
- p_start
+ 1);
6970 MT_BUG_ON(mas
->tree
,
6971 gap
> p_end
- p_start
+ 1);
6979 p_start
= p_end
+ 1;
6980 if (p_end
>= mas
->max
)
6985 if (mte_is_root(mte
))
6988 p_slot
= mte_parent_slot(mas
->node
);
6989 p_mn
= mte_parent(mte
);
6990 MT_BUG_ON(mas
->tree
, max_gap
> mas
->max
);
6991 if (ma_gaps(p_mn
, mas_parent_enum(mas
, mte
))[p_slot
] != max_gap
) {
6992 pr_err("gap %p[%u] != %lu\n", p_mn
, p_slot
, max_gap
);
6996 MT_BUG_ON(mas
->tree
,
6997 ma_gaps(p_mn
, mas_parent_enum(mas
, mte
))[p_slot
] != max_gap
);
7000 static void mas_validate_parent_slot(struct ma_state
*mas
)
7002 struct maple_node
*parent
;
7003 struct maple_enode
*node
;
7004 enum maple_type p_type
= mas_parent_enum(mas
, mas
->node
);
7005 unsigned char p_slot
= mte_parent_slot(mas
->node
);
7009 if (mte_is_root(mas
->node
))
7012 parent
= mte_parent(mas
->node
);
7013 slots
= ma_slots(parent
, p_type
);
7014 MT_BUG_ON(mas
->tree
, mas_mn(mas
) == parent
);
7016 /* Check prev/next parent slot for duplicate node entry */
7018 for (i
= 0; i
< mt_slots
[p_type
]; i
++) {
7019 node
= mas_slot(mas
, slots
, i
);
7021 if (node
!= mas
->node
)
7022 pr_err("parent %p[%u] does not have %p\n",
7023 parent
, i
, mas_mn(mas
));
7024 MT_BUG_ON(mas
->tree
, node
!= mas
->node
);
7025 } else if (node
== mas
->node
) {
7026 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7027 mas_mn(mas
), parent
, i
, p_slot
);
7028 MT_BUG_ON(mas
->tree
, node
== mas
->node
);
7033 static void mas_validate_child_slot(struct ma_state
*mas
)
7035 enum maple_type type
= mte_node_type(mas
->node
);
7036 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7037 unsigned long *pivots
= ma_pivots(mte_to_node(mas
->node
), type
);
7038 struct maple_enode
*child
;
7041 if (mte_is_leaf(mas
->node
))
7044 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7045 child
= mas_slot(mas
, slots
, i
);
7046 if (!pivots
[i
] || pivots
[i
] == mas
->max
)
7052 if (mte_parent_slot(child
) != i
) {
7053 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7054 mas_mn(mas
), i
, mte_to_node(child
),
7055 mte_parent_slot(child
));
7056 MT_BUG_ON(mas
->tree
, 1);
7059 if (mte_parent(child
) != mte_to_node(mas
->node
)) {
7060 pr_err("child %p has parent %p not %p\n",
7061 mte_to_node(child
), mte_parent(child
),
7062 mte_to_node(mas
->node
));
7063 MT_BUG_ON(mas
->tree
, 1);
7069 * Validate all pivots are within mas->min and mas->max.
7071 static void mas_validate_limits(struct ma_state
*mas
)
7074 unsigned long prev_piv
= 0;
7075 enum maple_type type
= mte_node_type(mas
->node
);
7076 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7077 unsigned long *pivots
= ma_pivots(mas_mn(mas
), type
);
7079 /* all limits are fine here. */
7080 if (mte_is_root(mas
->node
))
7083 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7086 piv
= mas_safe_pivot(mas
, pivots
, i
, type
);
7088 if (!piv
&& (i
!= 0))
7091 if (!mte_is_leaf(mas
->node
)) {
7092 void *entry
= mas_slot(mas
, slots
, i
);
7095 pr_err("%p[%u] cannot be null\n",
7098 MT_BUG_ON(mas
->tree
, !entry
);
7101 if (prev_piv
> piv
) {
7102 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7103 mas_mn(mas
), i
, piv
, prev_piv
);
7104 MT_BUG_ON(mas
->tree
, piv
< prev_piv
);
7107 if (piv
< mas
->min
) {
7108 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas
), i
,
7110 MT_BUG_ON(mas
->tree
, piv
< mas
->min
);
7112 if (piv
> mas
->max
) {
7113 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas
), i
,
7115 MT_BUG_ON(mas
->tree
, piv
> mas
->max
);
7118 if (piv
== mas
->max
)
7121 for (i
+= 1; i
< mt_slots
[type
]; i
++) {
7122 void *entry
= mas_slot(mas
, slots
, i
);
7124 if (entry
&& (i
!= mt_slots
[type
] - 1)) {
7125 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas
),
7127 MT_BUG_ON(mas
->tree
, entry
!= NULL
);
7130 if (i
< mt_pivots
[type
]) {
7131 unsigned long piv
= pivots
[i
];
7136 pr_err("%p[%u] should not have piv %lu\n",
7137 mas_mn(mas
), i
, piv
);
7138 MT_BUG_ON(mas
->tree
, i
< mt_pivots
[type
] - 1);
7143 static void mt_validate_nulls(struct maple_tree
*mt
)
7145 void *entry
, *last
= (void *)1;
7146 unsigned char offset
= 0;
7148 MA_STATE(mas
, mt
, 0, 0);
7151 if (mas_is_none(&mas
) || (mas
.node
== MAS_ROOT
))
7154 while (!mte_is_leaf(mas
.node
))
7157 slots
= ma_slots(mte_to_node(mas
.node
), mte_node_type(mas
.node
));
7159 entry
= mas_slot(&mas
, slots
, offset
);
7160 if (!last
&& !entry
) {
7161 pr_err("Sequential nulls end at %p[%u]\n",
7162 mas_mn(&mas
), offset
);
7164 MT_BUG_ON(mt
, !last
&& !entry
);
7166 if (offset
== mas_data_end(&mas
)) {
7167 mas_next_node(&mas
, mas_mn(&mas
), ULONG_MAX
);
7168 if (mas_is_none(&mas
))
7171 slots
= ma_slots(mte_to_node(mas
.node
),
7172 mte_node_type(mas
.node
));
7177 } while (!mas_is_none(&mas
));
7181 * validate a maple tree by checking:
7182 * 1. The limits (pivots are within mas->min to mas->max)
7183 * 2. The gap is correctly set in the parents
7185 void mt_validate(struct maple_tree
*mt
)
7189 MA_STATE(mas
, mt
, 0, 0);
7192 if (!mas_searchable(&mas
))
7195 mas_first_entry(&mas
, mas_mn(&mas
), ULONG_MAX
, mte_node_type(mas
.node
));
7196 while (!mas_is_none(&mas
)) {
7197 MT_BUG_ON(mas
.tree
, mte_dead_node(mas
.node
));
7198 if (!mte_is_root(mas
.node
)) {
7199 end
= mas_data_end(&mas
);
7200 if ((end
< mt_min_slot_count(mas
.node
)) &&
7201 (mas
.max
!= ULONG_MAX
)) {
7202 pr_err("Invalid size %u of %p\n", end
,
7204 MT_BUG_ON(mas
.tree
, 1);
7208 mas_validate_parent_slot(&mas
);
7209 mas_validate_child_slot(&mas
);
7210 mas_validate_limits(&mas
);
7211 if (mt_is_alloc(mt
))
7212 mas_validate_gaps(&mas
);
7213 mas_dfs_postorder(&mas
, ULONG_MAX
);
7215 mt_validate_nulls(mt
);
7220 EXPORT_SYMBOL_GPL(mt_validate
);
7222 #endif /* CONFIG_DEBUG_MAPLE_TREE */