Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * XArray implementation
4 : * Copyright (c) 2017-2018 Microsoft Corporation
5 : * Copyright (c) 2018-2020 Oracle
6 : * Author: Matthew Wilcox <willy@infradead.org>
7 : */
8 :
9 : #include <linux/bitmap.h>
10 : #include <linux/export.h>
11 : #include <linux/list.h>
12 : #include <linux/slab.h>
13 : #include <linux/xarray.h>
14 :
15 : /*
16 : * Coding conventions in this file:
17 : *
18 : * @xa is used to refer to the entire xarray.
19 : * @xas is the 'xarray operation state'. It may be either a pointer to
20 : * an xa_state, or an xa_state stored on the stack. This is an unfortunate
21 : * ambiguity.
22 : * @index is the index of the entry being operated on
23 : * @mark is an xa_mark_t; a small number indicating one of the mark bits.
24 : * @node refers to an xa_node; usually the primary one being operated on by
25 : * this function.
26 : * @offset is the index into the slots array inside an xa_node.
27 : * @parent refers to the @xa_node closer to the head than @node.
28 : * @entry refers to something stored in a slot in the xarray
29 : */
30 :
31 : static inline unsigned int xa_lock_type(const struct xarray *xa)
32 : {
33 2 : return (__force unsigned int)xa->xa_flags & 3;
34 : }
35 :
36 : static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
37 : {
38 0 : if (lock_type == XA_LOCK_IRQ)
39 0 : xas_lock_irq(xas);
40 0 : else if (lock_type == XA_LOCK_BH)
41 0 : xas_lock_bh(xas);
42 : else
43 0 : xas_lock(xas);
44 : }
45 :
46 0 : static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
47 : {
48 0 : if (lock_type == XA_LOCK_IRQ)
49 0 : xas_unlock_irq(xas);
50 0 : else if (lock_type == XA_LOCK_BH)
51 0 : xas_unlock_bh(xas);
52 : else
53 0 : xas_unlock(xas);
54 0 : }
55 :
56 : static inline bool xa_track_free(const struct xarray *xa)
57 : {
58 6 : return xa->xa_flags & XA_FLAGS_TRACK_FREE;
59 : }
60 :
61 : static inline bool xa_zero_busy(const struct xarray *xa)
62 : {
63 7 : return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
64 : }
65 :
66 : static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
67 : {
68 0 : if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
69 0 : xa->xa_flags |= XA_FLAGS_MARK(mark);
70 : }
71 :
72 : static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
73 : {
74 0 : if (xa->xa_flags & XA_FLAGS_MARK(mark))
75 0 : xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
76 : }
77 :
78 : static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
79 : {
80 0 : return node->marks[(__force unsigned)mark];
81 : }
82 :
83 : static inline bool node_get_mark(struct xa_node *node,
84 : unsigned int offset, xa_mark_t mark)
85 : {
86 0 : return test_bit(offset, node_marks(node, mark));
87 : }
88 :
89 : /* returns true if the bit was set */
90 : static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
91 : xa_mark_t mark)
92 : {
93 0 : return __test_and_set_bit(offset, node_marks(node, mark));
94 : }
95 :
96 : /* returns true if the bit was set */
97 : static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
98 : xa_mark_t mark)
99 : {
100 0 : return __test_and_clear_bit(offset, node_marks(node, mark));
101 : }
102 :
103 : static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
104 : {
105 0 : return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
106 : }
107 :
108 : static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
109 : {
110 0 : bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
111 : }
112 :
113 : #define mark_inc(mark) do { \
114 : mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
115 : } while (0)
116 :
117 : /*
118 : * xas_squash_marks() - Merge all marks to the first entry
119 : * @xas: Array operation state.
120 : *
121 : * Set a mark on the first entry if any entry has it set. Clear marks on
122 : * all sibling entries.
123 : */
124 0 : static void xas_squash_marks(const struct xa_state *xas)
125 : {
126 0 : unsigned int mark = 0;
127 0 : unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
128 :
129 0 : if (!xas->xa_sibs)
130 : return;
131 :
132 : do {
133 0 : unsigned long *marks = xas->xa_node->marks[mark];
134 0 : if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
135 0 : continue;
136 0 : __set_bit(xas->xa_offset, marks);
137 0 : bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
138 0 : } while (mark++ != (__force unsigned)XA_MARK_MAX);
139 : }
140 :
141 : /* extracts the offset within this node from the index */
142 : static unsigned int get_offset(unsigned long index, struct xa_node *node)
143 : {
144 6 : return (index >> node->shift) & XA_CHUNK_MASK;
145 : }
146 :
147 : static void xas_set_offset(struct xa_state *xas)
148 : {
149 0 : xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
150 : }
151 :
152 : /* move the index either forwards (find) or backwards (sibling slot) */
153 : static void xas_move_index(struct xa_state *xas, unsigned long offset)
154 : {
155 0 : unsigned int shift = xas->xa_node->shift;
156 0 : xas->xa_index &= ~XA_CHUNK_MASK << shift;
157 0 : xas->xa_index += offset << shift;
158 : }
159 :
160 : static void xas_next_offset(struct xa_state *xas)
161 : {
162 0 : xas->xa_offset++;
163 0 : xas_move_index(xas, xas->xa_offset);
164 : }
165 :
166 : static void *set_bounds(struct xa_state *xas)
167 : {
168 1 : xas->xa_node = XAS_BOUNDS;
169 : return NULL;
170 : }
171 :
172 : /*
173 : * Starts a walk. If the @xas is already valid, we assume that it's on
174 : * the right path and just return where we've got to. If we're in an
175 : * error state, return NULL. If the index is outside the current scope
176 : * of the xarray, return NULL without changing @xas->xa_node. Otherwise
177 : * set @xas->xa_node to NULL and return the current head of the array.
178 : */
179 2 : static void *xas_start(struct xa_state *xas)
180 : {
181 : void *entry;
182 :
183 2 : if (xas_valid(xas))
184 : return xas_reload(xas);
185 4 : if (xas_error(xas))
186 : return NULL;
187 :
188 4 : entry = xa_head(xas->xa);
189 2 : if (!xa_is_node(entry)) {
190 1 : if (xas->xa_index)
191 2 : return set_bounds(xas);
192 : } else {
193 2 : if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
194 0 : return set_bounds(xas);
195 : }
196 :
197 1 : xas->xa_node = NULL;
198 1 : return entry;
199 : }
200 :
201 : static void *xas_descend(struct xa_state *xas, struct xa_node *node)
202 : {
203 12 : unsigned int offset = get_offset(xas->xa_index, node);
204 12 : void *entry = xa_entry(xas->xa, node, offset);
205 :
206 6 : xas->xa_node = node;
207 : if (xa_is_sibling(entry)) {
208 : offset = xa_to_sibling(entry);
209 : entry = xa_entry(xas->xa, node, offset);
210 : if (node->shift && xa_is_node(entry))
211 : entry = XA_RETRY_ENTRY;
212 : }
213 :
214 6 : xas->xa_offset = offset;
215 : return entry;
216 : }
217 :
218 : /**
219 : * xas_load() - Load an entry from the XArray (advanced).
220 : * @xas: XArray operation state.
221 : *
222 : * Usually walks the @xas to the appropriate state to load the entry
223 : * stored at xa_index. However, it will do nothing and return %NULL if
224 : * @xas is in an error state. xas_load() will never expand the tree.
225 : *
226 : * If the xa_state is set up to operate on a multi-index entry, xas_load()
227 : * may return %NULL or an internal entry, even if there are entries
228 : * present within the range specified by @xas.
229 : *
230 : * Context: Any context. The caller should hold the xa_lock or the RCU lock.
231 : * Return: Usually an entry in the XArray, but see description for exceptions.
232 : */
233 2 : void *xas_load(struct xa_state *xas)
234 : {
235 2 : void *entry = xas_start(xas);
236 :
237 5 : while (xa_is_node(entry)) {
238 2 : struct xa_node *node = xa_to_node(entry);
239 :
240 2 : if (xas->xa_shift > node->shift)
241 : break;
242 2 : entry = xas_descend(xas, node);
243 2 : if (node->shift == 0)
244 : break;
245 : }
246 2 : return entry;
247 : }
248 : EXPORT_SYMBOL_GPL(xas_load);
249 :
250 : /* Move the radix tree node cache here */
251 : extern struct kmem_cache *radix_tree_node_cachep;
252 : extern void radix_tree_node_rcu_free(struct rcu_head *head);
253 :
254 : #define XA_RCU_FREE ((struct xarray *)1)
255 :
256 : static void xa_node_free(struct xa_node *node)
257 : {
258 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
259 0 : node->array = XA_RCU_FREE;
260 0 : call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
261 : }
262 :
263 : /*
264 : * xas_destroy() - Free any resources allocated during the XArray operation.
265 : * @xas: XArray operation state.
266 : *
267 : * This function is now internal-only.
268 : */
269 : static void xas_destroy(struct xa_state *xas)
270 : {
271 209 : struct xa_node *next, *node = xas->xa_alloc;
272 :
273 209 : while (node) {
274 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
275 0 : next = rcu_dereference_raw(node->parent);
276 0 : radix_tree_node_rcu_free(&node->rcu_head);
277 0 : xas->xa_alloc = node = next;
278 : }
279 : }
280 :
281 : /**
282 : * xas_nomem() - Allocate memory if needed.
283 : * @xas: XArray operation state.
284 : * @gfp: Memory allocation flags.
285 : *
286 : * If we need to add new nodes to the XArray, we try to allocate memory
287 : * with GFP_NOWAIT while holding the lock, which will usually succeed.
288 : * If it fails, @xas is flagged as needing memory to continue. The caller
289 : * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
290 : * the caller should retry the operation.
291 : *
292 : * Forward progress is guaranteed as one node is allocated here and
293 : * stored in the xa_state where it will be found by xas_alloc(). More
294 : * nodes will likely be found in the slab allocator, but we do not tie
295 : * them up here.
296 : *
297 : * Return: true if memory was needed, and was successfully allocated.
298 : */
299 207 : bool xas_nomem(struct xa_state *xas, gfp_t gfp)
300 : {
301 207 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
302 207 : xas_destroy(xas);
303 : return false;
304 : }
305 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
306 0 : gfp |= __GFP_ACCOUNT;
307 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
308 0 : if (!xas->xa_alloc)
309 : return false;
310 0 : xas->xa_alloc->parent = NULL;
311 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
312 0 : xas->xa_node = XAS_RESTART;
313 0 : return true;
314 : }
315 : EXPORT_SYMBOL_GPL(xas_nomem);
316 :
317 : /*
318 : * __xas_nomem() - Drop locks and allocate memory if needed.
319 : * @xas: XArray operation state.
320 : * @gfp: Memory allocation flags.
321 : *
322 : * Internal variant of xas_nomem().
323 : *
324 : * Return: true if memory was needed, and was successfully allocated.
325 : */
326 2 : static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
327 : __must_hold(xas->xa->xa_lock)
328 : {
329 4 : unsigned int lock_type = xa_lock_type(xas->xa);
330 :
331 2 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
332 2 : xas_destroy(xas);
333 : return false;
334 : }
335 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
336 0 : gfp |= __GFP_ACCOUNT;
337 0 : if (gfpflags_allow_blocking(gfp)) {
338 0 : xas_unlock_type(xas, lock_type);
339 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
340 0 : xas_lock_type(xas, lock_type);
341 : } else {
342 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
343 : }
344 0 : if (!xas->xa_alloc)
345 : return false;
346 0 : xas->xa_alloc->parent = NULL;
347 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
348 0 : xas->xa_node = XAS_RESTART;
349 0 : return true;
350 : }
351 :
352 : static void xas_update(struct xa_state *xas, struct xa_node *node)
353 : {
354 3 : if (xas->xa_update)
355 0 : xas->xa_update(node);
356 : else
357 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
358 : }
359 :
360 2 : static void *xas_alloc(struct xa_state *xas, unsigned int shift)
361 : {
362 2 : struct xa_node *parent = xas->xa_node;
363 2 : struct xa_node *node = xas->xa_alloc;
364 :
365 4 : if (xas_invalid(xas))
366 : return NULL;
367 :
368 2 : if (node) {
369 0 : xas->xa_alloc = NULL;
370 : } else {
371 2 : gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
372 :
373 2 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
374 0 : gfp |= __GFP_ACCOUNT;
375 :
376 2 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
377 2 : if (!node) {
378 0 : xas_set_err(xas, -ENOMEM);
379 0 : return NULL;
380 : }
381 : }
382 :
383 2 : if (parent) {
384 1 : node->offset = xas->xa_offset;
385 1 : parent->count++;
386 : XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
387 1 : xas_update(xas, parent);
388 : }
389 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
390 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
391 2 : node->shift = shift;
392 2 : node->count = 0;
393 2 : node->nr_values = 0;
394 2 : RCU_INIT_POINTER(node->parent, xas->xa_node);
395 2 : node->array = xas->xa;
396 :
397 2 : return node;
398 : }
399 :
400 : #ifdef CONFIG_XARRAY_MULTI
401 : /* Returns the number of indices covered by a given xa_state */
402 : static unsigned long xas_size(const struct xa_state *xas)
403 : {
404 : return (xas->xa_sibs + 1UL) << xas->xa_shift;
405 : }
406 : #endif
407 :
408 : /*
409 : * Use this to calculate the maximum index that will need to be created
410 : * in order to add the entry described by @xas. Because we cannot store a
411 : * multi-index entry at index 0, the calculation is a little more complex
412 : * than you might expect.
413 : */
414 : static unsigned long xas_max(struct xa_state *xas)
415 : {
416 91 : unsigned long max = xas->xa_index;
417 :
418 : #ifdef CONFIG_XARRAY_MULTI
419 : if (xas->xa_shift || xas->xa_sibs) {
420 : unsigned long mask = xas_size(xas) - 1;
421 : max |= mask;
422 : if (mask == max)
423 : max++;
424 : }
425 : #endif
426 :
427 : return max;
428 : }
429 :
430 : /* The maximum index that can be contained in the array without expanding it */
431 : static unsigned long max_index(void *entry)
432 : {
433 291 : if (!xa_is_node(entry))
434 : return 0;
435 1 : return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
436 : }
437 :
438 0 : static void xas_shrink(struct xa_state *xas)
439 : {
440 0 : struct xarray *xa = xas->xa;
441 0 : struct xa_node *node = xas->xa_node;
442 :
443 0 : for (;;) {
444 : void *entry;
445 :
446 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
447 0 : if (node->count != 1)
448 : break;
449 0 : entry = xa_entry_locked(xa, node, 0);
450 0 : if (!entry)
451 : break;
452 0 : if (!xa_is_node(entry) && node->shift)
453 : break;
454 0 : if (xa_is_zero(entry) && xa_zero_busy(xa))
455 0 : entry = NULL;
456 0 : xas->xa_node = XAS_BOUNDS;
457 :
458 0 : RCU_INIT_POINTER(xa->xa_head, entry);
459 0 : if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
460 0 : xa_mark_clear(xa, XA_FREE_MARK);
461 :
462 0 : node->count = 0;
463 0 : node->nr_values = 0;
464 0 : if (!xa_is_node(entry))
465 0 : RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
466 0 : xas_update(xas, node);
467 0 : xa_node_free(node);
468 0 : if (!xa_is_node(entry))
469 : break;
470 0 : node = xa_to_node(entry);
471 0 : node->parent = NULL;
472 : }
473 0 : }
474 :
475 : /*
476 : * xas_delete_node() - Attempt to delete an xa_node
477 : * @xas: Array operation state.
478 : *
479 : * Attempts to delete the @xas->xa_node. This will fail if xa->node has
480 : * a non-zero reference count.
481 : */
482 0 : static void xas_delete_node(struct xa_state *xas)
483 : {
484 0 : struct xa_node *node = xas->xa_node;
485 :
486 : for (;;) {
487 : struct xa_node *parent;
488 :
489 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
490 0 : if (node->count)
491 : break;
492 :
493 0 : parent = xa_parent_locked(xas->xa, node);
494 0 : xas->xa_node = parent;
495 0 : xas->xa_offset = node->offset;
496 0 : xa_node_free(node);
497 :
498 0 : if (!parent) {
499 0 : xas->xa->xa_head = NULL;
500 0 : xas->xa_node = XAS_BOUNDS;
501 0 : return;
502 : }
503 :
504 0 : parent->slots[xas->xa_offset] = NULL;
505 0 : parent->count--;
506 : XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
507 0 : node = parent;
508 0 : xas_update(xas, node);
509 : }
510 :
511 0 : if (!node->parent)
512 0 : xas_shrink(xas);
513 : }
514 :
515 : /**
516 : * xas_free_nodes() - Free this node and all nodes that it references
517 : * @xas: Array operation state.
518 : * @top: Node to free
519 : *
520 : * This node has been removed from the tree. We must now free it and all
521 : * of its subnodes. There may be RCU walkers with references into the tree,
522 : * so we must replace all entries with retry markers.
523 : */
524 0 : static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
525 : {
526 0 : unsigned int offset = 0;
527 0 : struct xa_node *node = top;
528 :
529 : for (;;) {
530 0 : void *entry = xa_entry_locked(xas->xa, node, offset);
531 :
532 0 : if (node->shift && xa_is_node(entry)) {
533 0 : node = xa_to_node(entry);
534 0 : offset = 0;
535 0 : continue;
536 : }
537 0 : if (entry)
538 0 : RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
539 0 : offset++;
540 0 : while (offset == XA_CHUNK_SIZE) {
541 : struct xa_node *parent;
542 :
543 0 : parent = xa_parent_locked(xas->xa, node);
544 0 : offset = node->offset + 1;
545 0 : node->count = 0;
546 0 : node->nr_values = 0;
547 0 : xas_update(xas, node);
548 0 : xa_node_free(node);
549 0 : if (node == top)
550 0 : return;
551 : node = parent;
552 : }
553 : }
554 : }
555 :
556 : /*
557 : * xas_expand adds nodes to the head of the tree until it has reached
558 : * sufficient height to be able to contain @xas->xa_index
559 : */
560 91 : static int xas_expand(struct xa_state *xas, void *head)
561 : {
562 91 : struct xarray *xa = xas->xa;
563 91 : struct xa_node *node = NULL;
564 91 : unsigned int shift = 0;
565 182 : unsigned long max = xas_max(xas);
566 :
567 91 : if (!head) {
568 7 : if (max == 0)
569 : return 0;
570 2 : while ((max >> shift) >= XA_CHUNK_SIZE)
571 1 : shift += XA_CHUNK_SHIFT;
572 1 : return shift + XA_CHUNK_SHIFT;
573 84 : } else if (xa_is_node(head)) {
574 1 : node = xa_to_node(head);
575 1 : shift = node->shift + XA_CHUNK_SHIFT;
576 : }
577 84 : xas->xa_node = NULL;
578 :
579 168 : while (max > max_index(head)) {
580 0 : xa_mark_t mark = 0;
581 :
582 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
583 0 : node = xas_alloc(xas, shift);
584 0 : if (!node)
585 : return -ENOMEM;
586 :
587 0 : node->count = 1;
588 0 : if (xa_is_value(head))
589 0 : node->nr_values = 1;
590 0 : RCU_INIT_POINTER(node->slots[0], head);
591 :
592 : /* Propagate the aggregated mark info to the new child */
593 : for (;;) {
594 0 : if (xa_track_free(xa) && mark == XA_FREE_MARK) {
595 0 : node_mark_all(node, XA_FREE_MARK);
596 0 : if (!xa_marked(xa, XA_FREE_MARK)) {
597 0 : node_clear_mark(node, 0, XA_FREE_MARK);
598 0 : xa_mark_set(xa, XA_FREE_MARK);
599 : }
600 0 : } else if (xa_marked(xa, mark)) {
601 : node_set_mark(node, 0, mark);
602 : }
603 0 : if (mark == XA_MARK_MAX)
604 : break;
605 0 : mark_inc(mark);
606 : }
607 :
608 : /*
609 : * Now that the new node is fully initialised, we can add
610 : * it to the tree
611 : */
612 0 : if (xa_is_node(head)) {
613 0 : xa_to_node(head)->offset = 0;
614 0 : rcu_assign_pointer(xa_to_node(head)->parent, node);
615 : }
616 0 : head = xa_mk_node(node);
617 0 : rcu_assign_pointer(xa->xa_head, head);
618 0 : xas_update(xas, node);
619 :
620 0 : shift += XA_CHUNK_SHIFT;
621 : }
622 :
623 84 : xas->xa_node = node;
624 84 : return shift;
625 : }
626 :
627 : /*
628 : * xas_create() - Create a slot to store an entry in.
629 : * @xas: XArray operation state.
630 : * @allow_root: %true if we can store the entry in the root directly
631 : *
632 : * Most users will not need to call this function directly, as it is called
633 : * by xas_store(). It is useful for doing conditional store operations
634 : * (see the xa_cmpxchg() implementation for an example).
635 : *
636 : * Return: If the slot already existed, returns the contents of this slot.
637 : * If the slot was newly created, returns %NULL. If it failed to create the
638 : * slot, returns %NULL and indicates the error in @xas.
639 : */
640 91 : static void *xas_create(struct xa_state *xas, bool allow_root)
641 : {
642 91 : struct xarray *xa = xas->xa;
643 : void *entry;
644 : void __rcu **slot;
645 91 : struct xa_node *node = xas->xa_node;
646 : int shift;
647 91 : unsigned int order = xas->xa_shift;
648 :
649 91 : if (xas_top(node)) {
650 91 : entry = xa_head_locked(xa);
651 91 : xas->xa_node = NULL;
652 98 : if (!entry && xa_zero_busy(xa))
653 0 : entry = XA_ZERO_ENTRY;
654 91 : shift = xas_expand(xas, entry);
655 91 : if (shift < 0)
656 : return NULL;
657 91 : if (!shift && !allow_root)
658 0 : shift = XA_CHUNK_SHIFT;
659 91 : entry = xa_head_locked(xa);
660 91 : slot = &xa->xa_head;
661 0 : } else if (xas_error(xas)) {
662 : return NULL;
663 0 : } else if (node) {
664 0 : unsigned int offset = xas->xa_offset;
665 :
666 0 : shift = node->shift;
667 0 : entry = xa_entry_locked(xa, node, offset);
668 0 : slot = &node->slots[offset];
669 : } else {
670 0 : shift = 0;
671 0 : entry = xa_head_locked(xa);
672 0 : slot = &xa->xa_head;
673 : }
674 :
675 95 : while (shift > order) {
676 4 : shift -= XA_CHUNK_SHIFT;
677 4 : if (!entry) {
678 2 : node = xas_alloc(xas, shift);
679 2 : if (!node)
680 : break;
681 4 : if (xa_track_free(xa))
682 : node_mark_all(node, XA_FREE_MARK);
683 2 : rcu_assign_pointer(*slot, xa_mk_node(node));
684 2 : } else if (xa_is_node(entry)) {
685 2 : node = xa_to_node(entry);
686 : } else {
687 : break;
688 : }
689 4 : entry = xas_descend(xas, node);
690 4 : slot = &node->slots[xas->xa_offset];
691 : }
692 :
693 : return entry;
694 : }
695 :
696 : /**
697 : * xas_create_range() - Ensure that stores to this range will succeed
698 : * @xas: XArray operation state.
699 : *
700 : * Creates all of the slots in the range covered by @xas. Sets @xas to
701 : * create single-index entries and positions it at the beginning of the
702 : * range. This is for the benefit of users which have not yet been
703 : * converted to use multi-index entries.
704 : */
705 0 : void xas_create_range(struct xa_state *xas)
706 : {
707 0 : unsigned long index = xas->xa_index;
708 0 : unsigned char shift = xas->xa_shift;
709 0 : unsigned char sibs = xas->xa_sibs;
710 :
711 0 : xas->xa_index |= ((sibs + 1UL) << shift) - 1;
712 0 : if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
713 0 : xas->xa_offset |= sibs;
714 0 : xas->xa_shift = 0;
715 0 : xas->xa_sibs = 0;
716 :
717 : for (;;) {
718 0 : xas_create(xas, true);
719 0 : if (xas_error(xas))
720 : goto restore;
721 0 : if (xas->xa_index <= (index | XA_CHUNK_MASK))
722 : goto success;
723 0 : xas->xa_index -= XA_CHUNK_SIZE;
724 :
725 : for (;;) {
726 0 : struct xa_node *node = xas->xa_node;
727 0 : if (node->shift >= shift)
728 : break;
729 0 : xas->xa_node = xa_parent_locked(xas->xa, node);
730 0 : xas->xa_offset = node->offset - 1;
731 0 : if (node->offset != 0)
732 : break;
733 : }
734 : }
735 :
736 : restore:
737 0 : xas->xa_shift = shift;
738 0 : xas->xa_sibs = sibs;
739 0 : xas->xa_index = index;
740 0 : return;
741 : success:
742 0 : xas->xa_index = index;
743 0 : if (xas->xa_node)
744 : xas_set_offset(xas);
745 : }
746 : EXPORT_SYMBOL_GPL(xas_create_range);
747 :
748 91 : static void update_node(struct xa_state *xas, struct xa_node *node,
749 : int count, int values)
750 : {
751 91 : if (!node || (!count && !values))
752 : return;
753 :
754 2 : node->count += count;
755 2 : node->nr_values += values;
756 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
757 : XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
758 4 : xas_update(xas, node);
759 2 : if (count < 0)
760 0 : xas_delete_node(xas);
761 : }
762 :
763 : /**
764 : * xas_store() - Store this entry in the XArray.
765 : * @xas: XArray operation state.
766 : * @entry: New entry.
767 : *
768 : * If @xas is operating on a multi-index entry, the entry returned by this
769 : * function is essentially meaningless (it may be an internal entry or it
770 : * may be %NULL, even if there are non-NULL entries at some of the indices
771 : * covered by the range). This is not a problem for any current users,
772 : * and can be changed if needed.
773 : *
774 : * Return: The old entry at this index.
775 : */
776 91 : void *xas_store(struct xa_state *xas, void *entry)
777 : {
778 : struct xa_node *node;
779 91 : void __rcu **slot = &xas->xa->xa_head;
780 : unsigned int offset, max;
781 91 : int count = 0;
782 91 : int values = 0;
783 : void *first, *next;
784 91 : bool value = xa_is_value(entry);
785 :
786 91 : if (entry) {
787 182 : bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
788 91 : first = xas_create(xas, allow_root);
789 : } else {
790 0 : first = xas_load(xas);
791 : }
792 :
793 182 : if (xas_invalid(xas))
794 : return first;
795 91 : node = xas->xa_node;
796 91 : if (node && (xas->xa_shift < node->shift))
797 0 : xas->xa_sibs = 0;
798 91 : if ((first == entry) && !xas->xa_sibs)
799 : return first;
800 :
801 91 : next = first;
802 91 : offset = xas->xa_offset;
803 91 : max = xas->xa_offset + xas->xa_sibs;
804 91 : if (node) {
805 2 : slot = &node->slots[offset];
806 2 : if (xas->xa_sibs)
807 0 : xas_squash_marks(xas);
808 : }
809 91 : if (!entry)
810 0 : xas_init_marks(xas);
811 :
812 : for (;;) {
813 : /*
814 : * Must clear the marks before setting the entry to NULL,
815 : * otherwise xas_for_each_marked may find a NULL entry and
816 : * stop early. rcu_assign_pointer contains a release barrier
817 : * so the mark clearing will appear to happen before the
818 : * entry is set to NULL.
819 : */
820 91 : rcu_assign_pointer(*slot, entry);
821 91 : if (xa_is_node(next) && (!node || node->shift))
822 0 : xas_free_nodes(xas, xa_to_node(next));
823 91 : if (!node)
824 : break;
825 2 : count += !next - !entry;
826 2 : values += !xa_is_value(first) - !value;
827 2 : if (entry) {
828 2 : if (offset == max)
829 : break;
830 0 : if (!xa_is_sibling(entry))
831 0 : entry = xa_mk_sibling(xas->xa_offset);
832 : } else {
833 0 : if (offset == XA_CHUNK_MASK)
834 : break;
835 : }
836 0 : next = xa_entry_locked(xas->xa, node, ++offset);
837 : if (!xa_is_sibling(next)) {
838 0 : if (!entry && (offset > max))
839 : break;
840 0 : first = next;
841 : }
842 0 : slot++;
843 : }
844 :
845 91 : update_node(xas, node, count, values);
846 91 : return first;
847 : }
848 : EXPORT_SYMBOL_GPL(xas_store);
849 :
850 : /**
851 : * xas_get_mark() - Returns the state of this mark.
852 : * @xas: XArray operation state.
853 : * @mark: Mark number.
854 : *
855 : * Return: true if the mark is set, false if the mark is clear or @xas
856 : * is in an error state.
857 : */
858 0 : bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
859 : {
860 0 : if (xas_invalid(xas))
861 : return false;
862 0 : if (!xas->xa_node)
863 0 : return xa_marked(xas->xa, mark);
864 0 : return node_get_mark(xas->xa_node, xas->xa_offset, mark);
865 : }
866 : EXPORT_SYMBOL_GPL(xas_get_mark);
867 :
868 : /**
869 : * xas_set_mark() - Sets the mark on this entry and its parents.
870 : * @xas: XArray operation state.
871 : * @mark: Mark number.
872 : *
873 : * Sets the specified mark on this entry, and walks up the tree setting it
874 : * on all the ancestor entries. Does nothing if @xas has not been walked to
875 : * an entry, or is in an error state.
876 : */
877 0 : void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
878 : {
879 0 : struct xa_node *node = xas->xa_node;
880 0 : unsigned int offset = xas->xa_offset;
881 :
882 0 : if (xas_invalid(xas))
883 : return;
884 :
885 0 : while (node) {
886 0 : if (node_set_mark(node, offset, mark))
887 : return;
888 0 : offset = node->offset;
889 0 : node = xa_parent_locked(xas->xa, node);
890 : }
891 :
892 0 : if (!xa_marked(xas->xa, mark))
893 0 : xa_mark_set(xas->xa, mark);
894 : }
895 : EXPORT_SYMBOL_GPL(xas_set_mark);
896 :
897 : /**
898 : * xas_clear_mark() - Clears the mark on this entry and its parents.
899 : * @xas: XArray operation state.
900 : * @mark: Mark number.
901 : *
902 : * Clears the specified mark on this entry, and walks back to the head
903 : * attempting to clear it on all the ancestor entries. Does nothing if
904 : * @xas has not been walked to an entry, or is in an error state.
905 : */
906 0 : void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
907 : {
908 0 : struct xa_node *node = xas->xa_node;
909 0 : unsigned int offset = xas->xa_offset;
910 :
911 0 : if (xas_invalid(xas))
912 : return;
913 :
914 0 : while (node) {
915 0 : if (!node_clear_mark(node, offset, mark))
916 : return;
917 0 : if (node_any_mark(node, mark))
918 : return;
919 :
920 0 : offset = node->offset;
921 0 : node = xa_parent_locked(xas->xa, node);
922 : }
923 :
924 0 : if (xa_marked(xas->xa, mark))
925 0 : xa_mark_clear(xas->xa, mark);
926 : }
927 : EXPORT_SYMBOL_GPL(xas_clear_mark);
928 :
929 : /**
930 : * xas_init_marks() - Initialise all marks for the entry
931 : * @xas: Array operations state.
932 : *
933 : * Initialise all marks for the entry specified by @xas. If we're tracking
934 : * free entries with a mark, we need to set it on all entries. All other
935 : * marks are cleared.
936 : *
937 : * This implementation is not as efficient as it could be; we may walk
938 : * up the tree multiple times.
939 : */
940 0 : void xas_init_marks(const struct xa_state *xas)
941 : {
942 0 : xa_mark_t mark = 0;
943 :
944 : for (;;) {
945 0 : if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
946 0 : xas_set_mark(xas, mark);
947 : else
948 0 : xas_clear_mark(xas, mark);
949 0 : if (mark == XA_MARK_MAX)
950 : break;
951 0 : mark_inc(mark);
952 : }
953 0 : }
954 : EXPORT_SYMBOL_GPL(xas_init_marks);
955 :
956 : #ifdef CONFIG_XARRAY_MULTI
957 : static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
958 : {
959 : unsigned int marks = 0;
960 : xa_mark_t mark = XA_MARK_0;
961 :
962 : for (;;) {
963 : if (node_get_mark(node, offset, mark))
964 : marks |= 1 << (__force unsigned int)mark;
965 : if (mark == XA_MARK_MAX)
966 : break;
967 : mark_inc(mark);
968 : }
969 :
970 : return marks;
971 : }
972 :
973 : static void node_set_marks(struct xa_node *node, unsigned int offset,
974 : struct xa_node *child, unsigned int marks)
975 : {
976 : xa_mark_t mark = XA_MARK_0;
977 :
978 : for (;;) {
979 : if (marks & (1 << (__force unsigned int)mark)) {
980 : node_set_mark(node, offset, mark);
981 : if (child)
982 : node_mark_all(child, mark);
983 : }
984 : if (mark == XA_MARK_MAX)
985 : break;
986 : mark_inc(mark);
987 : }
988 : }
989 :
990 : /**
991 : * xas_split_alloc() - Allocate memory for splitting an entry.
992 : * @xas: XArray operation state.
993 : * @entry: New entry which will be stored in the array.
994 : * @order: Current entry order.
995 : * @gfp: Memory allocation flags.
996 : *
997 : * This function should be called before calling xas_split().
998 : * If necessary, it will allocate new nodes (and fill them with @entry)
999 : * to prepare for the upcoming split of an entry of @order size into
1000 : * entries of the order stored in the @xas.
1001 : *
1002 : * Context: May sleep if @gfp flags permit.
1003 : */
1004 : void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1005 : gfp_t gfp)
1006 : {
1007 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1008 : unsigned int mask = xas->xa_sibs;
1009 :
1010 : /* XXX: no support for splitting really large entries yet */
1011 : if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1012 : goto nomem;
1013 : if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1014 : return;
1015 :
1016 : do {
1017 : unsigned int i;
1018 : void *sibling = NULL;
1019 : struct xa_node *node;
1020 :
1021 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1022 : if (!node)
1023 : goto nomem;
1024 : node->array = xas->xa;
1025 : for (i = 0; i < XA_CHUNK_SIZE; i++) {
1026 : if ((i & mask) == 0) {
1027 : RCU_INIT_POINTER(node->slots[i], entry);
1028 : sibling = xa_mk_sibling(i);
1029 : } else {
1030 : RCU_INIT_POINTER(node->slots[i], sibling);
1031 : }
1032 : }
1033 : RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1034 : xas->xa_alloc = node;
1035 : } while (sibs-- > 0);
1036 :
1037 : return;
1038 : nomem:
1039 : xas_destroy(xas);
1040 : xas_set_err(xas, -ENOMEM);
1041 : }
1042 : EXPORT_SYMBOL_GPL(xas_split_alloc);
1043 :
1044 : /**
1045 : * xas_split() - Split a multi-index entry into smaller entries.
1046 : * @xas: XArray operation state.
1047 : * @entry: New entry to store in the array.
1048 : * @order: Current entry order.
1049 : *
1050 : * The size of the new entries is set in @xas. The value in @entry is
1051 : * copied to all the replacement entries.
1052 : *
1053 : * Context: Any context. The caller should hold the xa_lock.
1054 : */
1055 : void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1056 : {
1057 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1058 : unsigned int offset, marks;
1059 : struct xa_node *node;
1060 : void *curr = xas_load(xas);
1061 : int values = 0;
1062 :
1063 : node = xas->xa_node;
1064 : if (xas_top(node))
1065 : return;
1066 :
1067 : marks = node_get_marks(node, xas->xa_offset);
1068 :
1069 : offset = xas->xa_offset + sibs;
1070 : do {
1071 : if (xas->xa_shift < node->shift) {
1072 : struct xa_node *child = xas->xa_alloc;
1073 :
1074 : xas->xa_alloc = rcu_dereference_raw(child->parent);
1075 : child->shift = node->shift - XA_CHUNK_SHIFT;
1076 : child->offset = offset;
1077 : child->count = XA_CHUNK_SIZE;
1078 : child->nr_values = xa_is_value(entry) ?
1079 : XA_CHUNK_SIZE : 0;
1080 : RCU_INIT_POINTER(child->parent, node);
1081 : node_set_marks(node, offset, child, marks);
1082 : rcu_assign_pointer(node->slots[offset],
1083 : xa_mk_node(child));
1084 : if (xa_is_value(curr))
1085 : values--;
1086 : xas_update(xas, child);
1087 : } else {
1088 : unsigned int canon = offset - xas->xa_sibs;
1089 :
1090 : node_set_marks(node, canon, NULL, marks);
1091 : rcu_assign_pointer(node->slots[canon], entry);
1092 : while (offset > canon)
1093 : rcu_assign_pointer(node->slots[offset--],
1094 : xa_mk_sibling(canon));
1095 : values += (xa_is_value(entry) - xa_is_value(curr)) *
1096 : (xas->xa_sibs + 1);
1097 : }
1098 : } while (offset-- > xas->xa_offset);
1099 :
1100 : node->nr_values += values;
1101 : xas_update(xas, node);
1102 : }
1103 : EXPORT_SYMBOL_GPL(xas_split);
1104 : #endif
1105 :
1106 : /**
1107 : * xas_pause() - Pause a walk to drop a lock.
1108 : * @xas: XArray operation state.
1109 : *
1110 : * Some users need to pause a walk and drop the lock they're holding in
1111 : * order to yield to a higher priority thread or carry out an operation
1112 : * on an entry. Those users should call this function before they drop
1113 : * the lock. It resets the @xas to be suitable for the next iteration
1114 : * of the loop after the user has reacquired the lock. If most entries
1115 : * found during a walk require you to call xas_pause(), the xa_for_each()
1116 : * iterator may be more appropriate.
1117 : *
1118 : * Note that xas_pause() only works for forward iteration. If a user needs
1119 : * to pause a reverse iteration, we will need a xas_pause_rev().
1120 : */
1121 0 : void xas_pause(struct xa_state *xas)
1122 : {
1123 0 : struct xa_node *node = xas->xa_node;
1124 :
1125 0 : if (xas_invalid(xas))
1126 : return;
1127 :
1128 0 : xas->xa_node = XAS_RESTART;
1129 0 : if (node) {
1130 0 : unsigned long offset = xas->xa_offset;
1131 0 : while (++offset < XA_CHUNK_SIZE) {
1132 0 : if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1133 : break;
1134 : }
1135 0 : xas->xa_index += (offset - xas->xa_offset) << node->shift;
1136 0 : if (xas->xa_index == 0)
1137 0 : xas->xa_node = XAS_BOUNDS;
1138 : } else {
1139 0 : xas->xa_index++;
1140 : }
1141 : }
1142 : EXPORT_SYMBOL_GPL(xas_pause);
1143 :
1144 : /*
1145 : * __xas_prev() - Find the previous entry in the XArray.
1146 : * @xas: XArray operation state.
1147 : *
1148 : * Helper function for xas_prev() which handles all the complex cases
1149 : * out of line.
1150 : */
1151 0 : void *__xas_prev(struct xa_state *xas)
1152 : {
1153 : void *entry;
1154 :
1155 0 : if (!xas_frozen(xas->xa_node))
1156 0 : xas->xa_index--;
1157 0 : if (!xas->xa_node)
1158 0 : return set_bounds(xas);
1159 0 : if (xas_not_node(xas->xa_node))
1160 0 : return xas_load(xas);
1161 :
1162 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1163 0 : xas->xa_offset--;
1164 :
1165 0 : while (xas->xa_offset == 255) {
1166 0 : xas->xa_offset = xas->xa_node->offset - 1;
1167 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1168 0 : if (!xas->xa_node)
1169 0 : return set_bounds(xas);
1170 : }
1171 :
1172 : for (;;) {
1173 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1174 0 : if (!xa_is_node(entry))
1175 : return entry;
1176 :
1177 0 : xas->xa_node = xa_to_node(entry);
1178 : xas_set_offset(xas);
1179 : }
1180 : }
1181 : EXPORT_SYMBOL_GPL(__xas_prev);
1182 :
1183 : /*
1184 : * __xas_next() - Find the next entry in the XArray.
1185 : * @xas: XArray operation state.
1186 : *
1187 : * Helper function for xas_next() which handles all the complex cases
1188 : * out of line.
1189 : */
1190 0 : void *__xas_next(struct xa_state *xas)
1191 : {
1192 : void *entry;
1193 :
1194 0 : if (!xas_frozen(xas->xa_node))
1195 0 : xas->xa_index++;
1196 0 : if (!xas->xa_node)
1197 0 : return set_bounds(xas);
1198 0 : if (xas_not_node(xas->xa_node))
1199 0 : return xas_load(xas);
1200 :
1201 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1202 0 : xas->xa_offset++;
1203 :
1204 0 : while (xas->xa_offset == XA_CHUNK_SIZE) {
1205 0 : xas->xa_offset = xas->xa_node->offset + 1;
1206 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1207 0 : if (!xas->xa_node)
1208 0 : return set_bounds(xas);
1209 : }
1210 :
1211 : for (;;) {
1212 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1213 0 : if (!xa_is_node(entry))
1214 : return entry;
1215 :
1216 0 : xas->xa_node = xa_to_node(entry);
1217 : xas_set_offset(xas);
1218 : }
1219 : }
1220 : EXPORT_SYMBOL_GPL(__xas_next);
1221 :
1222 : /**
1223 : * xas_find() - Find the next present entry in the XArray.
1224 : * @xas: XArray operation state.
1225 : * @max: Highest index to return.
1226 : *
1227 : * If the @xas has not yet been walked to an entry, return the entry
1228 : * which has an index >= xas.xa_index. If it has been walked, the entry
1229 : * currently being pointed at has been processed, and so we move to the
1230 : * next entry.
1231 : *
1232 : * If no entry is found and the array is smaller than @max, the iterator
1233 : * is set to the smallest index not yet in the array. This allows @xas
1234 : * to be immediately passed to xas_store().
1235 : *
1236 : * Return: The entry, if found, otherwise %NULL.
1237 : */
1238 0 : void *xas_find(struct xa_state *xas, unsigned long max)
1239 : {
1240 : void *entry;
1241 :
1242 0 : if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1243 : return NULL;
1244 0 : if (xas->xa_index > max)
1245 0 : return set_bounds(xas);
1246 :
1247 0 : if (!xas->xa_node) {
1248 0 : xas->xa_index = 1;
1249 0 : return set_bounds(xas);
1250 0 : } else if (xas->xa_node == XAS_RESTART) {
1251 0 : entry = xas_load(xas);
1252 0 : if (entry || xas_not_node(xas->xa_node))
1253 : return entry;
1254 0 : } else if (!xas->xa_node->shift &&
1255 0 : xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1256 0 : xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1257 : }
1258 :
1259 : xas_next_offset(xas);
1260 :
1261 0 : while (xas->xa_node && (xas->xa_index <= max)) {
1262 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1263 0 : xas->xa_offset = xas->xa_node->offset + 1;
1264 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1265 0 : continue;
1266 : }
1267 :
1268 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1269 0 : if (xa_is_node(entry)) {
1270 0 : xas->xa_node = xa_to_node(entry);
1271 0 : xas->xa_offset = 0;
1272 0 : continue;
1273 : }
1274 0 : if (entry && !xa_is_sibling(entry))
1275 : return entry;
1276 :
1277 : xas_next_offset(xas);
1278 : }
1279 :
1280 0 : if (!xas->xa_node)
1281 0 : xas->xa_node = XAS_BOUNDS;
1282 : return NULL;
1283 : }
1284 : EXPORT_SYMBOL_GPL(xas_find);
1285 :
1286 : /**
1287 : * xas_find_marked() - Find the next marked entry in the XArray.
1288 : * @xas: XArray operation state.
1289 : * @max: Highest index to return.
1290 : * @mark: Mark number to search for.
1291 : *
1292 : * If the @xas has not yet been walked to an entry, return the marked entry
1293 : * which has an index >= xas.xa_index. If it has been walked, the entry
1294 : * currently being pointed at has been processed, and so we return the
1295 : * first marked entry with an index > xas.xa_index.
1296 : *
1297 : * If no marked entry is found and the array is smaller than @max, @xas is
1298 : * set to the bounds state and xas->xa_index is set to the smallest index
1299 : * not yet in the array. This allows @xas to be immediately passed to
1300 : * xas_store().
1301 : *
1302 : * If no entry is found before @max is reached, @xas is set to the restart
1303 : * state.
1304 : *
1305 : * Return: The entry, if found, otherwise %NULL.
1306 : */
1307 207 : void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1308 : {
1309 207 : bool advance = true;
1310 : unsigned int offset;
1311 : void *entry;
1312 :
1313 414 : if (xas_error(xas))
1314 : return NULL;
1315 207 : if (xas->xa_index > max)
1316 : goto max;
1317 :
1318 207 : if (!xas->xa_node) {
1319 0 : xas->xa_index = 1;
1320 0 : goto out;
1321 207 : } else if (xas_top(xas->xa_node)) {
1322 207 : advance = false;
1323 414 : entry = xa_head(xas->xa);
1324 207 : xas->xa_node = NULL;
1325 414 : if (xas->xa_index > max_index(entry))
1326 : goto out;
1327 207 : if (!xa_is_node(entry)) {
1328 414 : if (xa_marked(xas->xa, mark))
1329 : return entry;
1330 0 : xas->xa_index = 1;
1331 0 : goto out;
1332 : }
1333 0 : xas->xa_node = xa_to_node(entry);
1334 0 : xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1335 : }
1336 :
1337 0 : while (xas->xa_index <= max) {
1338 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1339 0 : xas->xa_offset = xas->xa_node->offset + 1;
1340 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1341 0 : if (!xas->xa_node)
1342 : break;
1343 0 : advance = false;
1344 0 : continue;
1345 : }
1346 :
1347 0 : if (!advance) {
1348 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1349 : if (xa_is_sibling(entry)) {
1350 : xas->xa_offset = xa_to_sibling(entry);
1351 : xas_move_index(xas, xas->xa_offset);
1352 : }
1353 : }
1354 :
1355 0 : offset = xas_find_chunk(xas, advance, mark);
1356 0 : if (offset > xas->xa_offset) {
1357 0 : advance = false;
1358 0 : xas_move_index(xas, offset);
1359 : /* Mind the wrap */
1360 0 : if ((xas->xa_index - 1) >= max)
1361 : goto max;
1362 0 : xas->xa_offset = offset;
1363 0 : if (offset == XA_CHUNK_SIZE)
1364 0 : continue;
1365 : }
1366 :
1367 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1368 0 : if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1369 0 : continue;
1370 0 : if (!xa_is_node(entry))
1371 : return entry;
1372 0 : xas->xa_node = xa_to_node(entry);
1373 : xas_set_offset(xas);
1374 : }
1375 :
1376 : out:
1377 0 : if (xas->xa_index > max)
1378 : goto max;
1379 0 : return set_bounds(xas);
1380 : max:
1381 0 : xas->xa_node = XAS_RESTART;
1382 0 : return NULL;
1383 : }
1384 : EXPORT_SYMBOL_GPL(xas_find_marked);
1385 :
1386 : /**
1387 : * xas_find_conflict() - Find the next present entry in a range.
1388 : * @xas: XArray operation state.
1389 : *
1390 : * The @xas describes both a range and a position within that range.
1391 : *
1392 : * Context: Any context. Expects xa_lock to be held.
1393 : * Return: The next entry in the range covered by @xas or %NULL.
1394 : */
1395 0 : void *xas_find_conflict(struct xa_state *xas)
1396 : {
1397 : void *curr;
1398 :
1399 0 : if (xas_error(xas))
1400 : return NULL;
1401 :
1402 0 : if (!xas->xa_node)
1403 : return NULL;
1404 :
1405 0 : if (xas_top(xas->xa_node)) {
1406 0 : curr = xas_start(xas);
1407 0 : if (!curr)
1408 : return NULL;
1409 0 : while (xa_is_node(curr)) {
1410 0 : struct xa_node *node = xa_to_node(curr);
1411 0 : curr = xas_descend(xas, node);
1412 : }
1413 0 : if (curr)
1414 : return curr;
1415 : }
1416 :
1417 0 : if (xas->xa_node->shift > xas->xa_shift)
1418 : return NULL;
1419 :
1420 : for (;;) {
1421 0 : if (xas->xa_node->shift == xas->xa_shift) {
1422 0 : if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1423 : break;
1424 0 : } else if (xas->xa_offset == XA_CHUNK_MASK) {
1425 0 : xas->xa_offset = xas->xa_node->offset;
1426 0 : xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1427 0 : if (!xas->xa_node)
1428 : break;
1429 0 : continue;
1430 : }
1431 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1432 : if (xa_is_sibling(curr))
1433 : continue;
1434 0 : while (xa_is_node(curr)) {
1435 0 : xas->xa_node = xa_to_node(curr);
1436 0 : xas->xa_offset = 0;
1437 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1438 : }
1439 0 : if (curr)
1440 : return curr;
1441 : }
1442 0 : xas->xa_offset -= xas->xa_sibs;
1443 0 : return NULL;
1444 : }
1445 : EXPORT_SYMBOL_GPL(xas_find_conflict);
1446 :
1447 : /**
1448 : * xa_load() - Load an entry from an XArray.
1449 : * @xa: XArray.
1450 : * @index: index into array.
1451 : *
1452 : * Context: Any context. Takes and releases the RCU lock.
1453 : * Return: The entry at @index in @xa.
1454 : */
1455 2 : void *xa_load(struct xarray *xa, unsigned long index)
1456 : {
1457 2 : XA_STATE(xas, xa, index);
1458 : void *entry;
1459 :
1460 : rcu_read_lock();
1461 : do {
1462 2 : entry = xas_load(&xas);
1463 2 : if (xa_is_zero(entry))
1464 0 : entry = NULL;
1465 2 : } while (xas_retry(&xas, entry));
1466 : rcu_read_unlock();
1467 :
1468 2 : return entry;
1469 : }
1470 : EXPORT_SYMBOL(xa_load);
1471 :
1472 : static void *xas_result(struct xa_state *xas, void *curr)
1473 : {
1474 2 : if (xa_is_zero(curr))
1475 : return NULL;
1476 4 : if (xas_error(xas))
1477 0 : curr = xas->xa_node;
1478 : return curr;
1479 : }
1480 :
1481 : /**
1482 : * __xa_erase() - Erase this entry from the XArray while locked.
1483 : * @xa: XArray.
1484 : * @index: Index into array.
1485 : *
1486 : * After this function returns, loading from @index will return %NULL.
1487 : * If the index is part of a multi-index entry, all indices will be erased
1488 : * and none of the entries will be part of a multi-index entry.
1489 : *
1490 : * Context: Any context. Expects xa_lock to be held on entry.
1491 : * Return: The entry which used to be at this index.
1492 : */
1493 0 : void *__xa_erase(struct xarray *xa, unsigned long index)
1494 : {
1495 0 : XA_STATE(xas, xa, index);
1496 0 : return xas_result(&xas, xas_store(&xas, NULL));
1497 : }
1498 : EXPORT_SYMBOL(__xa_erase);
1499 :
1500 : /**
1501 : * xa_erase() - Erase this entry from the XArray.
1502 : * @xa: XArray.
1503 : * @index: Index of entry.
1504 : *
1505 : * After this function returns, loading from @index will return %NULL.
1506 : * If the index is part of a multi-index entry, all indices will be erased
1507 : * and none of the entries will be part of a multi-index entry.
1508 : *
1509 : * Context: Any context. Takes and releases the xa_lock.
1510 : * Return: The entry which used to be at this index.
1511 : */
1512 0 : void *xa_erase(struct xarray *xa, unsigned long index)
1513 : {
1514 : void *entry;
1515 :
1516 0 : xa_lock(xa);
1517 0 : entry = __xa_erase(xa, index);
1518 0 : xa_unlock(xa);
1519 :
1520 0 : return entry;
1521 : }
1522 : EXPORT_SYMBOL(xa_erase);
1523 :
1524 : /**
1525 : * __xa_store() - Store this entry in the XArray.
1526 : * @xa: XArray.
1527 : * @index: Index into array.
1528 : * @entry: New entry.
1529 : * @gfp: Memory allocation flags.
1530 : *
1531 : * You must already be holding the xa_lock when calling this function.
1532 : * It will drop the lock if needed to allocate memory, and then reacquire
1533 : * it afterwards.
1534 : *
1535 : * Context: Any context. Expects xa_lock to be held on entry. May
1536 : * release and reacquire xa_lock if @gfp flags permit.
1537 : * Return: The old entry at this index or xa_err() if an error happened.
1538 : */
1539 2 : void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1540 : {
1541 2 : XA_STATE(xas, xa, index);
1542 : void *curr;
1543 :
1544 2 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1545 : return XA_ERROR(-EINVAL);
1546 4 : if (xa_track_free(xa) && !entry)
1547 0 : entry = XA_ZERO_ENTRY;
1548 :
1549 : do {
1550 2 : curr = xas_store(&xas, entry);
1551 4 : if (xa_track_free(xa))
1552 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1553 2 : } while (__xas_nomem(&xas, gfp));
1554 :
1555 : return xas_result(&xas, curr);
1556 : }
1557 : EXPORT_SYMBOL(__xa_store);
1558 :
1559 : /**
1560 : * xa_store() - Store this entry in the XArray.
1561 : * @xa: XArray.
1562 : * @index: Index into array.
1563 : * @entry: New entry.
1564 : * @gfp: Memory allocation flags.
1565 : *
1566 : * After this function returns, loads from this index will return @entry.
1567 : * Storing into an existing multi-index entry updates the entry of every index.
1568 : * The marks associated with @index are unaffected unless @entry is %NULL.
1569 : *
1570 : * Context: Any context. Takes and releases the xa_lock.
1571 : * May sleep if the @gfp flags permit.
1572 : * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1573 : * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1574 : * failed.
1575 : */
1576 2 : void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1577 : {
1578 : void *curr;
1579 :
1580 4 : xa_lock(xa);
1581 2 : curr = __xa_store(xa, index, entry, gfp);
1582 4 : xa_unlock(xa);
1583 :
1584 2 : return curr;
1585 : }
1586 : EXPORT_SYMBOL(xa_store);
1587 :
1588 : /**
1589 : * __xa_cmpxchg() - Store this entry in the XArray.
1590 : * @xa: XArray.
1591 : * @index: Index into array.
1592 : * @old: Old value to test against.
1593 : * @entry: New entry.
1594 : * @gfp: Memory allocation flags.
1595 : *
1596 : * You must already be holding the xa_lock when calling this function.
1597 : * It will drop the lock if needed to allocate memory, and then reacquire
1598 : * it afterwards.
1599 : *
1600 : * Context: Any context. Expects xa_lock to be held on entry. May
1601 : * release and reacquire xa_lock if @gfp flags permit.
1602 : * Return: The old entry at this index or xa_err() if an error happened.
1603 : */
1604 0 : void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1605 : void *old, void *entry, gfp_t gfp)
1606 : {
1607 0 : XA_STATE(xas, xa, index);
1608 : void *curr;
1609 :
1610 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1611 : return XA_ERROR(-EINVAL);
1612 :
1613 : do {
1614 0 : curr = xas_load(&xas);
1615 0 : if (curr == old) {
1616 0 : xas_store(&xas, entry);
1617 0 : if (xa_track_free(xa) && entry && !curr)
1618 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1619 : }
1620 0 : } while (__xas_nomem(&xas, gfp));
1621 :
1622 : return xas_result(&xas, curr);
1623 : }
1624 : EXPORT_SYMBOL(__xa_cmpxchg);
1625 :
1626 : /**
1627 : * __xa_insert() - Store this entry in the XArray if no entry is present.
1628 : * @xa: XArray.
1629 : * @index: Index into array.
1630 : * @entry: New entry.
1631 : * @gfp: Memory allocation flags.
1632 : *
1633 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1634 : * if no entry is present. Inserting will fail if a reserved entry is
1635 : * present, even though loading from this index will return NULL.
1636 : *
1637 : * Context: Any context. Expects xa_lock to be held on entry. May
1638 : * release and reacquire xa_lock if @gfp flags permit.
1639 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
1640 : * -ENOMEM if memory could not be allocated.
1641 : */
1642 0 : int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1643 : {
1644 0 : XA_STATE(xas, xa, index);
1645 : void *curr;
1646 :
1647 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1648 : return -EINVAL;
1649 0 : if (!entry)
1650 0 : entry = XA_ZERO_ENTRY;
1651 :
1652 : do {
1653 0 : curr = xas_load(&xas);
1654 0 : if (!curr) {
1655 0 : xas_store(&xas, entry);
1656 0 : if (xa_track_free(xa))
1657 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1658 : } else {
1659 0 : xas_set_err(&xas, -EBUSY);
1660 : }
1661 0 : } while (__xas_nomem(&xas, gfp));
1662 :
1663 0 : return xas_error(&xas);
1664 : }
1665 : EXPORT_SYMBOL(__xa_insert);
1666 :
1667 : #ifdef CONFIG_XARRAY_MULTI
1668 : static void xas_set_range(struct xa_state *xas, unsigned long first,
1669 : unsigned long last)
1670 : {
1671 : unsigned int shift = 0;
1672 : unsigned long sibs = last - first;
1673 : unsigned int offset = XA_CHUNK_MASK;
1674 :
1675 : xas_set(xas, first);
1676 :
1677 : while ((first & XA_CHUNK_MASK) == 0) {
1678 : if (sibs < XA_CHUNK_MASK)
1679 : break;
1680 : if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1681 : break;
1682 : shift += XA_CHUNK_SHIFT;
1683 : if (offset == XA_CHUNK_MASK)
1684 : offset = sibs & XA_CHUNK_MASK;
1685 : sibs >>= XA_CHUNK_SHIFT;
1686 : first >>= XA_CHUNK_SHIFT;
1687 : }
1688 :
1689 : offset = first & XA_CHUNK_MASK;
1690 : if (offset + sibs > XA_CHUNK_MASK)
1691 : sibs = XA_CHUNK_MASK - offset;
1692 : if ((((first + sibs + 1) << shift) - 1) > last)
1693 : sibs -= 1;
1694 :
1695 : xas->xa_shift = shift;
1696 : xas->xa_sibs = sibs;
1697 : }
1698 :
1699 : /**
1700 : * xa_store_range() - Store this entry at a range of indices in the XArray.
1701 : * @xa: XArray.
1702 : * @first: First index to affect.
1703 : * @last: Last index to affect.
1704 : * @entry: New entry.
1705 : * @gfp: Memory allocation flags.
1706 : *
1707 : * After this function returns, loads from any index between @first and @last,
1708 : * inclusive will return @entry.
1709 : * Storing into an existing multi-index entry updates the entry of every index.
1710 : * The marks associated with @index are unaffected unless @entry is %NULL.
1711 : *
1712 : * Context: Process context. Takes and releases the xa_lock. May sleep
1713 : * if the @gfp flags permit.
1714 : * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1715 : * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1716 : */
1717 : void *xa_store_range(struct xarray *xa, unsigned long first,
1718 : unsigned long last, void *entry, gfp_t gfp)
1719 : {
1720 : XA_STATE(xas, xa, 0);
1721 :
1722 : if (WARN_ON_ONCE(xa_is_internal(entry)))
1723 : return XA_ERROR(-EINVAL);
1724 : if (last < first)
1725 : return XA_ERROR(-EINVAL);
1726 :
1727 : do {
1728 : xas_lock(&xas);
1729 : if (entry) {
1730 : unsigned int order = BITS_PER_LONG;
1731 : if (last + 1)
1732 : order = __ffs(last + 1);
1733 : xas_set_order(&xas, last, order);
1734 : xas_create(&xas, true);
1735 : if (xas_error(&xas))
1736 : goto unlock;
1737 : }
1738 : do {
1739 : xas_set_range(&xas, first, last);
1740 : xas_store(&xas, entry);
1741 : if (xas_error(&xas))
1742 : goto unlock;
1743 : first += xas_size(&xas);
1744 : } while (first <= last);
1745 : unlock:
1746 : xas_unlock(&xas);
1747 : } while (xas_nomem(&xas, gfp));
1748 :
1749 : return xas_result(&xas, NULL);
1750 : }
1751 : EXPORT_SYMBOL(xa_store_range);
1752 :
1753 : /**
1754 : * xa_get_order() - Get the order of an entry.
1755 : * @xa: XArray.
1756 : * @index: Index of the entry.
1757 : *
1758 : * Return: A number between 0 and 63 indicating the order of the entry.
1759 : */
1760 : int xa_get_order(struct xarray *xa, unsigned long index)
1761 : {
1762 : XA_STATE(xas, xa, index);
1763 : void *entry;
1764 : int order = 0;
1765 :
1766 : rcu_read_lock();
1767 : entry = xas_load(&xas);
1768 :
1769 : if (!entry)
1770 : goto unlock;
1771 :
1772 : if (!xas.xa_node)
1773 : goto unlock;
1774 :
1775 : for (;;) {
1776 : unsigned int slot = xas.xa_offset + (1 << order);
1777 :
1778 : if (slot >= XA_CHUNK_SIZE)
1779 : break;
1780 : if (!xa_is_sibling(xas.xa_node->slots[slot]))
1781 : break;
1782 : order++;
1783 : }
1784 :
1785 : order += xas.xa_node->shift;
1786 : unlock:
1787 : rcu_read_unlock();
1788 :
1789 : return order;
1790 : }
1791 : EXPORT_SYMBOL(xa_get_order);
1792 : #endif /* CONFIG_XARRAY_MULTI */
1793 :
1794 : /**
1795 : * __xa_alloc() - Find somewhere to store this entry in the XArray.
1796 : * @xa: XArray.
1797 : * @id: Pointer to ID.
1798 : * @limit: Range for allocated ID.
1799 : * @entry: New entry.
1800 : * @gfp: Memory allocation flags.
1801 : *
1802 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1803 : * stores the index into the @id pointer, then stores the entry at
1804 : * that index. A concurrent lookup will not see an uninitialised @id.
1805 : *
1806 : * Context: Any context. Expects xa_lock to be held on entry. May
1807 : * release and reacquire xa_lock if @gfp flags permit.
1808 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
1809 : * -EBUSY if there are no free entries in @limit.
1810 : */
1811 0 : int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1812 : struct xa_limit limit, gfp_t gfp)
1813 : {
1814 0 : XA_STATE(xas, xa, 0);
1815 :
1816 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1817 : return -EINVAL;
1818 0 : if (WARN_ON_ONCE(!xa_track_free(xa)))
1819 : return -EINVAL;
1820 :
1821 0 : if (!entry)
1822 0 : entry = XA_ZERO_ENTRY;
1823 :
1824 : do {
1825 0 : xas.xa_index = limit.min;
1826 0 : xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1827 0 : if (xas.xa_node == XAS_RESTART)
1828 0 : xas_set_err(&xas, -EBUSY);
1829 : else
1830 0 : *id = xas.xa_index;
1831 0 : xas_store(&xas, entry);
1832 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1833 0 : } while (__xas_nomem(&xas, gfp));
1834 :
1835 0 : return xas_error(&xas);
1836 : }
1837 : EXPORT_SYMBOL(__xa_alloc);
1838 :
1839 : /**
1840 : * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1841 : * @xa: XArray.
1842 : * @id: Pointer to ID.
1843 : * @entry: New entry.
1844 : * @limit: Range of allocated ID.
1845 : * @next: Pointer to next ID to allocate.
1846 : * @gfp: Memory allocation flags.
1847 : *
1848 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1849 : * stores the index into the @id pointer, then stores the entry at
1850 : * that index. A concurrent lookup will not see an uninitialised @id.
1851 : * The search for an empty entry will start at @next and will wrap
1852 : * around if necessary.
1853 : *
1854 : * Context: Any context. Expects xa_lock to be held on entry. May
1855 : * release and reacquire xa_lock if @gfp flags permit.
1856 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
1857 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
1858 : * allocated or -EBUSY if there are no free entries in @limit.
1859 : */
1860 0 : int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1861 : struct xa_limit limit, u32 *next, gfp_t gfp)
1862 : {
1863 0 : u32 min = limit.min;
1864 : int ret;
1865 :
1866 0 : limit.min = max(min, *next);
1867 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1868 0 : if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1869 0 : xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1870 0 : ret = 1;
1871 : }
1872 :
1873 0 : if (ret < 0 && limit.min > min) {
1874 0 : limit.min = min;
1875 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1876 0 : if (ret == 0)
1877 0 : ret = 1;
1878 : }
1879 :
1880 0 : if (ret >= 0) {
1881 0 : *next = *id + 1;
1882 0 : if (*next == 0)
1883 0 : xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1884 : }
1885 0 : return ret;
1886 : }
1887 : EXPORT_SYMBOL(__xa_alloc_cyclic);
1888 :
1889 : /**
1890 : * __xa_set_mark() - Set this mark on this entry while locked.
1891 : * @xa: XArray.
1892 : * @index: Index of entry.
1893 : * @mark: Mark number.
1894 : *
1895 : * Attempting to set a mark on a %NULL entry does not succeed.
1896 : *
1897 : * Context: Any context. Expects xa_lock to be held on entry.
1898 : */
1899 0 : void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1900 : {
1901 0 : XA_STATE(xas, xa, index);
1902 0 : void *entry = xas_load(&xas);
1903 :
1904 0 : if (entry)
1905 0 : xas_set_mark(&xas, mark);
1906 0 : }
1907 : EXPORT_SYMBOL(__xa_set_mark);
1908 :
1909 : /**
1910 : * __xa_clear_mark() - Clear this mark on this entry while locked.
1911 : * @xa: XArray.
1912 : * @index: Index of entry.
1913 : * @mark: Mark number.
1914 : *
1915 : * Context: Any context. Expects xa_lock to be held on entry.
1916 : */
1917 0 : void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1918 : {
1919 0 : XA_STATE(xas, xa, index);
1920 0 : void *entry = xas_load(&xas);
1921 :
1922 0 : if (entry)
1923 0 : xas_clear_mark(&xas, mark);
1924 0 : }
1925 : EXPORT_SYMBOL(__xa_clear_mark);
1926 :
1927 : /**
1928 : * xa_get_mark() - Inquire whether this mark is set on this entry.
1929 : * @xa: XArray.
1930 : * @index: Index of entry.
1931 : * @mark: Mark number.
1932 : *
1933 : * This function uses the RCU read lock, so the result may be out of date
1934 : * by the time it returns. If you need the result to be stable, use a lock.
1935 : *
1936 : * Context: Any context. Takes and releases the RCU lock.
1937 : * Return: True if the entry at @index has this mark set, false if it doesn't.
1938 : */
1939 0 : bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1940 : {
1941 0 : XA_STATE(xas, xa, index);
1942 : void *entry;
1943 :
1944 : rcu_read_lock();
1945 0 : entry = xas_start(&xas);
1946 0 : while (xas_get_mark(&xas, mark)) {
1947 0 : if (!xa_is_node(entry))
1948 : goto found;
1949 0 : entry = xas_descend(&xas, xa_to_node(entry));
1950 : }
1951 : rcu_read_unlock();
1952 0 : return false;
1953 : found:
1954 : rcu_read_unlock();
1955 0 : return true;
1956 : }
1957 : EXPORT_SYMBOL(xa_get_mark);
1958 :
1959 : /**
1960 : * xa_set_mark() - Set this mark on this entry.
1961 : * @xa: XArray.
1962 : * @index: Index of entry.
1963 : * @mark: Mark number.
1964 : *
1965 : * Attempting to set a mark on a %NULL entry does not succeed.
1966 : *
1967 : * Context: Process context. Takes and releases the xa_lock.
1968 : */
1969 0 : void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1970 : {
1971 0 : xa_lock(xa);
1972 0 : __xa_set_mark(xa, index, mark);
1973 0 : xa_unlock(xa);
1974 0 : }
1975 : EXPORT_SYMBOL(xa_set_mark);
1976 :
1977 : /**
1978 : * xa_clear_mark() - Clear this mark on this entry.
1979 : * @xa: XArray.
1980 : * @index: Index of entry.
1981 : * @mark: Mark number.
1982 : *
1983 : * Clearing a mark always succeeds.
1984 : *
1985 : * Context: Process context. Takes and releases the xa_lock.
1986 : */
1987 0 : void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1988 : {
1989 0 : xa_lock(xa);
1990 0 : __xa_clear_mark(xa, index, mark);
1991 0 : xa_unlock(xa);
1992 0 : }
1993 : EXPORT_SYMBOL(xa_clear_mark);
1994 :
1995 : /**
1996 : * xa_find() - Search the XArray for an entry.
1997 : * @xa: XArray.
1998 : * @indexp: Pointer to an index.
1999 : * @max: Maximum index to search to.
2000 : * @filter: Selection criterion.
2001 : *
2002 : * Finds the entry in @xa which matches the @filter, and has the lowest
2003 : * index that is at least @indexp and no more than @max.
2004 : * If an entry is found, @indexp is updated to be the index of the entry.
2005 : * This function is protected by the RCU read lock, so it may not find
2006 : * entries which are being simultaneously added. It will not return an
2007 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2008 : *
2009 : * Context: Any context. Takes and releases the RCU lock.
2010 : * Return: The entry, if found, otherwise %NULL.
2011 : */
2012 0 : void *xa_find(struct xarray *xa, unsigned long *indexp,
2013 : unsigned long max, xa_mark_t filter)
2014 : {
2015 0 : XA_STATE(xas, xa, *indexp);
2016 : void *entry;
2017 :
2018 : rcu_read_lock();
2019 : do {
2020 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2021 0 : entry = xas_find_marked(&xas, max, filter);
2022 : else
2023 0 : entry = xas_find(&xas, max);
2024 0 : } while (xas_retry(&xas, entry));
2025 : rcu_read_unlock();
2026 :
2027 0 : if (entry)
2028 0 : *indexp = xas.xa_index;
2029 0 : return entry;
2030 : }
2031 : EXPORT_SYMBOL(xa_find);
2032 :
2033 : static bool xas_sibling(struct xa_state *xas)
2034 : {
2035 0 : struct xa_node *node = xas->xa_node;
2036 : unsigned long mask;
2037 :
2038 : if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
2039 : return false;
2040 : mask = (XA_CHUNK_SIZE << node->shift) - 1;
2041 : return (xas->xa_index & mask) >
2042 : ((unsigned long)xas->xa_offset << node->shift);
2043 : }
2044 :
2045 : /**
2046 : * xa_find_after() - Search the XArray for a present entry.
2047 : * @xa: XArray.
2048 : * @indexp: Pointer to an index.
2049 : * @max: Maximum index to search to.
2050 : * @filter: Selection criterion.
2051 : *
2052 : * Finds the entry in @xa which matches the @filter and has the lowest
2053 : * index that is above @indexp and no more than @max.
2054 : * If an entry is found, @indexp is updated to be the index of the entry.
2055 : * This function is protected by the RCU read lock, so it may miss entries
2056 : * which are being simultaneously added. It will not return an
2057 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2058 : *
2059 : * Context: Any context. Takes and releases the RCU lock.
2060 : * Return: The pointer, if found, otherwise %NULL.
2061 : */
2062 0 : void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2063 : unsigned long max, xa_mark_t filter)
2064 : {
2065 0 : XA_STATE(xas, xa, *indexp + 1);
2066 : void *entry;
2067 :
2068 0 : if (xas.xa_index == 0)
2069 : return NULL;
2070 :
2071 : rcu_read_lock();
2072 : for (;;) {
2073 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2074 0 : entry = xas_find_marked(&xas, max, filter);
2075 : else
2076 0 : entry = xas_find(&xas, max);
2077 :
2078 0 : if (xas_invalid(&xas))
2079 : break;
2080 0 : if (xas_sibling(&xas))
2081 : continue;
2082 0 : if (!xas_retry(&xas, entry))
2083 : break;
2084 : }
2085 : rcu_read_unlock();
2086 :
2087 0 : if (entry)
2088 0 : *indexp = xas.xa_index;
2089 : return entry;
2090 : }
2091 : EXPORT_SYMBOL(xa_find_after);
2092 :
2093 0 : static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2094 : unsigned long max, unsigned int n)
2095 : {
2096 : void *entry;
2097 0 : unsigned int i = 0;
2098 :
2099 : rcu_read_lock();
2100 0 : xas_for_each(xas, entry, max) {
2101 0 : if (xas_retry(xas, entry))
2102 0 : continue;
2103 0 : dst[i++] = entry;
2104 0 : if (i == n)
2105 : break;
2106 : }
2107 : rcu_read_unlock();
2108 :
2109 0 : return i;
2110 : }
2111 :
2112 0 : static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2113 : unsigned long max, unsigned int n, xa_mark_t mark)
2114 : {
2115 : void *entry;
2116 0 : unsigned int i = 0;
2117 :
2118 : rcu_read_lock();
2119 0 : xas_for_each_marked(xas, entry, max, mark) {
2120 0 : if (xas_retry(xas, entry))
2121 0 : continue;
2122 0 : dst[i++] = entry;
2123 0 : if (i == n)
2124 : break;
2125 : }
2126 : rcu_read_unlock();
2127 :
2128 0 : return i;
2129 : }
2130 :
2131 : /**
2132 : * xa_extract() - Copy selected entries from the XArray into a normal array.
2133 : * @xa: The source XArray to copy from.
2134 : * @dst: The buffer to copy entries into.
2135 : * @start: The first index in the XArray eligible to be selected.
2136 : * @max: The last index in the XArray eligible to be selected.
2137 : * @n: The maximum number of entries to copy.
2138 : * @filter: Selection criterion.
2139 : *
2140 : * Copies up to @n entries that match @filter from the XArray. The
2141 : * copied entries will have indices between @start and @max, inclusive.
2142 : *
2143 : * The @filter may be an XArray mark value, in which case entries which are
2144 : * marked with that mark will be copied. It may also be %XA_PRESENT, in
2145 : * which case all entries which are not %NULL will be copied.
2146 : *
2147 : * The entries returned may not represent a snapshot of the XArray at a
2148 : * moment in time. For example, if another thread stores to index 5, then
2149 : * index 10, calling xa_extract() may return the old contents of index 5
2150 : * and the new contents of index 10. Indices not modified while this
2151 : * function is running will not be skipped.
2152 : *
2153 : * If you need stronger guarantees, holding the xa_lock across calls to this
2154 : * function will prevent concurrent modification.
2155 : *
2156 : * Context: Any context. Takes and releases the RCU lock.
2157 : * Return: The number of entries copied.
2158 : */
2159 0 : unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2160 : unsigned long max, unsigned int n, xa_mark_t filter)
2161 : {
2162 0 : XA_STATE(xas, xa, start);
2163 :
2164 0 : if (!n)
2165 : return 0;
2166 :
2167 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2168 0 : return xas_extract_marked(&xas, dst, max, n, filter);
2169 0 : return xas_extract_present(&xas, dst, max, n);
2170 : }
2171 : EXPORT_SYMBOL(xa_extract);
2172 :
2173 : /**
2174 : * xa_delete_node() - Private interface for workingset code.
2175 : * @node: Node to be removed from the tree.
2176 : * @update: Function to call to update ancestor nodes.
2177 : *
2178 : * Context: xa_lock must be held on entry and will not be released.
2179 : */
2180 0 : void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2181 : {
2182 0 : struct xa_state xas = {
2183 0 : .xa = node->array,
2184 0 : .xa_index = (unsigned long)node->offset <<
2185 0 : (node->shift + XA_CHUNK_SHIFT),
2186 0 : .xa_shift = node->shift + XA_CHUNK_SHIFT,
2187 : .xa_offset = node->offset,
2188 0 : .xa_node = xa_parent_locked(node->array, node),
2189 : .xa_update = update,
2190 : };
2191 :
2192 0 : xas_store(&xas, NULL);
2193 0 : }
2194 : EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
2195 :
2196 : /**
2197 : * xa_destroy() - Free all internal data structures.
2198 : * @xa: XArray.
2199 : *
2200 : * After calling this function, the XArray is empty and has freed all memory
2201 : * allocated for its internal data structures. You are responsible for
2202 : * freeing the objects referenced by the XArray.
2203 : *
2204 : * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
2205 : */
2206 0 : void xa_destroy(struct xarray *xa)
2207 : {
2208 0 : XA_STATE(xas, xa, 0);
2209 : unsigned long flags;
2210 : void *entry;
2211 :
2212 0 : xas.xa_node = NULL;
2213 0 : xas_lock_irqsave(&xas, flags);
2214 0 : entry = xa_head_locked(xa);
2215 0 : RCU_INIT_POINTER(xa->xa_head, NULL);
2216 0 : xas_init_marks(&xas);
2217 0 : if (xa_zero_busy(xa))
2218 0 : xa_mark_clear(xa, XA_FREE_MARK);
2219 : /* lockdep checks we're still holding the lock in xas_free_nodes() */
2220 0 : if (xa_is_node(entry))
2221 0 : xas_free_nodes(&xas, xa_to_node(entry));
2222 0 : xas_unlock_irqrestore(&xas, flags);
2223 0 : }
2224 : EXPORT_SYMBOL(xa_destroy);
2225 :
2226 : #ifdef XA_DEBUG
2227 : void xa_dump_node(const struct xa_node *node)
2228 : {
2229 : unsigned i, j;
2230 :
2231 : if (!node)
2232 : return;
2233 : if ((unsigned long)node & 3) {
2234 : pr_cont("node %px\n", node);
2235 : return;
2236 : }
2237 :
2238 : pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2239 : "array %px list %px %px marks",
2240 : node, node->parent ? "offset" : "max", node->offset,
2241 : node->parent, node->shift, node->count, node->nr_values,
2242 : node->array, node->private_list.prev, node->private_list.next);
2243 : for (i = 0; i < XA_MAX_MARKS; i++)
2244 : for (j = 0; j < XA_MARK_LONGS; j++)
2245 : pr_cont(" %lx", node->marks[i][j]);
2246 : pr_cont("\n");
2247 : }
2248 :
2249 : void xa_dump_index(unsigned long index, unsigned int shift)
2250 : {
2251 : if (!shift)
2252 : pr_info("%lu: ", index);
2253 : else if (shift >= BITS_PER_LONG)
2254 : pr_info("0-%lu: ", ~0UL);
2255 : else
2256 : pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2257 : }
2258 :
2259 : void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2260 : {
2261 : if (!entry)
2262 : return;
2263 :
2264 : xa_dump_index(index, shift);
2265 :
2266 : if (xa_is_node(entry)) {
2267 : if (shift == 0) {
2268 : pr_cont("%px\n", entry);
2269 : } else {
2270 : unsigned long i;
2271 : struct xa_node *node = xa_to_node(entry);
2272 : xa_dump_node(node);
2273 : for (i = 0; i < XA_CHUNK_SIZE; i++)
2274 : xa_dump_entry(node->slots[i],
2275 : index + (i << node->shift), node->shift);
2276 : }
2277 : } else if (xa_is_value(entry))
2278 : pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2279 : xa_to_value(entry), entry);
2280 : else if (!xa_is_internal(entry))
2281 : pr_cont("%px\n", entry);
2282 : else if (xa_is_retry(entry))
2283 : pr_cont("retry (%ld)\n", xa_to_internal(entry));
2284 : else if (xa_is_sibling(entry))
2285 : pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2286 : else if (xa_is_zero(entry))
2287 : pr_cont("zero (%ld)\n", xa_to_internal(entry));
2288 : else
2289 : pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2290 : }
2291 :
2292 : void xa_dump(const struct xarray *xa)
2293 : {
2294 : void *entry = xa->xa_head;
2295 : unsigned int shift = 0;
2296 :
2297 : pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2298 : xa->xa_flags, xa_marked(xa, XA_MARK_0),
2299 : xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2300 : if (xa_is_node(entry))
2301 : shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2302 : xa_dump_entry(entry, 0, shift);
2303 : }
2304 : #endif
|