Line data Source code
1 : // SPDX-License-Identifier: MIT
2 : /*
3 : * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 : *
5 : * Based on bo.c which bears the following copyright notice,
6 : * but is dual licensed:
7 : *
8 : * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9 : * All Rights Reserved.
10 : *
11 : * Permission is hereby granted, free of charge, to any person obtaining a
12 : * copy of this software and associated documentation files (the
13 : * "Software"), to deal in the Software without restriction, including
14 : * without limitation the rights to use, copy, modify, merge, publish,
15 : * distribute, sub license, and/or sell copies of the Software, and to
16 : * permit persons to whom the Software is furnished to do so, subject to
17 : * the following conditions:
18 : *
19 : * The above copyright notice and this permission notice (including the
20 : * next paragraph) shall be included in all copies or substantial portions
21 : * of the Software.
22 : *
23 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 : *
31 : **************************************************************************/
32 : /*
33 : * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34 : */
35 :
36 : #include <linux/dma-resv.h>
37 : #include <linux/dma-fence-array.h>
38 : #include <linux/export.h>
39 : #include <linux/mm.h>
40 : #include <linux/sched/mm.h>
41 : #include <linux/mmu_notifier.h>
42 : #include <linux/seq_file.h>
43 :
44 : /**
45 : * DOC: Reservation Object Overview
46 : *
47 : * The reservation object provides a mechanism to manage a container of
48 : * dma_fence object associated with a resource. A reservation object
49 : * can have any number of fences attaches to it. Each fence carries an usage
50 : * parameter determining how the operation represented by the fence is using the
51 : * resource. The RCU mechanism is used to protect read access to fences from
52 : * locked write-side updates.
53 : *
54 : * See struct dma_resv for more details.
55 : */
56 :
57 : DEFINE_WD_CLASS(reservation_ww_class);
58 : EXPORT_SYMBOL(reservation_ww_class);
59 :
60 : /* Mask for the lower fence pointer bits */
61 : #define DMA_RESV_LIST_MASK 0x3
62 :
63 : struct dma_resv_list {
64 : struct rcu_head rcu;
65 : u32 num_fences, max_fences;
66 : struct dma_fence __rcu *table[];
67 : };
68 :
69 : /* Extract the fence and usage flags from an RCU protected entry in the list. */
70 : static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
71 : struct dma_resv *resv, struct dma_fence **fence,
72 : enum dma_resv_usage *usage)
73 : {
74 : long tmp;
75 :
76 0 : tmp = (long)rcu_dereference_check(list->table[index],
77 : resv ? dma_resv_held(resv) : true);
78 0 : *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
79 0 : if (usage)
80 0 : *usage = tmp & DMA_RESV_LIST_MASK;
81 : }
82 :
83 : /* Set the fence and usage flags at the specific index in the list. */
84 : static void dma_resv_list_set(struct dma_resv_list *list,
85 : unsigned int index,
86 : struct dma_fence *fence,
87 : enum dma_resv_usage usage)
88 : {
89 0 : long tmp = ((long)fence) | usage;
90 :
91 0 : RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
92 : }
93 :
94 : /*
95 : * Allocate a new dma_resv_list and make sure to correctly initialize
96 : * max_fences.
97 : */
98 0 : static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99 : {
100 : struct dma_resv_list *list;
101 :
102 0 : list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
103 0 : if (!list)
104 : return NULL;
105 :
106 0 : list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
107 : sizeof(*list->table);
108 :
109 0 : return list;
110 : }
111 :
112 : /* Free a dma_resv_list and make sure to drop all references. */
113 0 : static void dma_resv_list_free(struct dma_resv_list *list)
114 : {
115 : unsigned int i;
116 :
117 0 : if (!list)
118 : return;
119 :
120 0 : for (i = 0; i < list->num_fences; ++i) {
121 : struct dma_fence *fence;
122 :
123 0 : dma_resv_list_entry(list, i, NULL, &fence, NULL);
124 0 : dma_fence_put(fence);
125 : }
126 0 : kfree_rcu(list, rcu);
127 : }
128 :
129 : /**
130 : * dma_resv_init - initialize a reservation object
131 : * @obj: the reservation object
132 : */
133 0 : void dma_resv_init(struct dma_resv *obj)
134 : {
135 0 : ww_mutex_init(&obj->lock, &reservation_ww_class);
136 :
137 0 : RCU_INIT_POINTER(obj->fences, NULL);
138 0 : }
139 : EXPORT_SYMBOL(dma_resv_init);
140 :
141 : /**
142 : * dma_resv_fini - destroys a reservation object
143 : * @obj: the reservation object
144 : */
145 0 : void dma_resv_fini(struct dma_resv *obj)
146 : {
147 : /*
148 : * This object should be dead and all references must have
149 : * been released to it, so no need to be protected with rcu.
150 : */
151 0 : dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
152 0 : ww_mutex_destroy(&obj->lock);
153 0 : }
154 : EXPORT_SYMBOL(dma_resv_fini);
155 :
156 : /* Dereference the fences while ensuring RCU rules */
157 : static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
158 : {
159 0 : return rcu_dereference_check(obj->fences, dma_resv_held(obj));
160 : }
161 :
162 : /**
163 : * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
164 : * @obj: reservation object
165 : * @num_fences: number of fences we want to add
166 : *
167 : * Should be called before dma_resv_add_fence(). Must be called with @obj
168 : * locked through dma_resv_lock().
169 : *
170 : * Note that the preallocated slots need to be re-reserved if @obj is unlocked
171 : * at any time before calling dma_resv_add_fence(). This is validated when
172 : * CONFIG_DEBUG_MUTEXES is enabled.
173 : *
174 : * RETURNS
175 : * Zero for success, or -errno
176 : */
177 0 : int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
178 : {
179 : struct dma_resv_list *old, *new;
180 : unsigned int i, j, k, max;
181 :
182 : dma_resv_assert_held(obj);
183 :
184 0 : old = dma_resv_fences_list(obj);
185 0 : if (old && old->max_fences) {
186 0 : if ((old->num_fences + num_fences) <= old->max_fences)
187 : return 0;
188 0 : max = max(old->num_fences + num_fences, old->max_fences * 2);
189 : } else {
190 0 : max = max(4ul, roundup_pow_of_two(num_fences));
191 : }
192 :
193 0 : new = dma_resv_list_alloc(max);
194 0 : if (!new)
195 : return -ENOMEM;
196 :
197 : /*
198 : * no need to bump fence refcounts, rcu_read access
199 : * requires the use of kref_get_unless_zero, and the
200 : * references from the old struct are carried over to
201 : * the new.
202 : */
203 0 : for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
204 : enum dma_resv_usage usage;
205 : struct dma_fence *fence;
206 :
207 0 : dma_resv_list_entry(old, i, obj, &fence, &usage);
208 0 : if (dma_fence_is_signaled(fence))
209 0 : RCU_INIT_POINTER(new->table[--k], fence);
210 : else
211 0 : dma_resv_list_set(new, j++, fence, usage);
212 : }
213 0 : new->num_fences = j;
214 :
215 : /*
216 : * We are not changing the effective set of fences here so can
217 : * merely update the pointer to the new array; both existing
218 : * readers and new readers will see exactly the same set of
219 : * active (unsignaled) fences. Individual fences and the
220 : * old array are protected by RCU and so will not vanish under
221 : * the gaze of the rcu_read_lock() readers.
222 : */
223 0 : rcu_assign_pointer(obj->fences, new);
224 :
225 0 : if (!old)
226 : return 0;
227 :
228 : /* Drop the references to the signaled fences */
229 0 : for (i = k; i < max; ++i) {
230 : struct dma_fence *fence;
231 :
232 0 : fence = rcu_dereference_protected(new->table[i],
233 : dma_resv_held(obj));
234 0 : dma_fence_put(fence);
235 : }
236 0 : kfree_rcu(old, rcu);
237 :
238 0 : return 0;
239 : }
240 : EXPORT_SYMBOL(dma_resv_reserve_fences);
241 :
242 : #ifdef CONFIG_DEBUG_MUTEXES
243 : /**
244 : * dma_resv_reset_max_fences - reset fences for debugging
245 : * @obj: the dma_resv object to reset
246 : *
247 : * Reset the number of pre-reserved fence slots to test that drivers do
248 : * correct slot allocation using dma_resv_reserve_fences(). See also
249 : * &dma_resv_list.max_fences.
250 : */
251 : void dma_resv_reset_max_fences(struct dma_resv *obj)
252 : {
253 : struct dma_resv_list *fences = dma_resv_fences_list(obj);
254 :
255 : dma_resv_assert_held(obj);
256 :
257 : /* Test fence slot reservation */
258 : if (fences)
259 : fences->max_fences = fences->num_fences;
260 : }
261 : EXPORT_SYMBOL(dma_resv_reset_max_fences);
262 : #endif
263 :
264 : /**
265 : * dma_resv_add_fence - Add a fence to the dma_resv obj
266 : * @obj: the reservation object
267 : * @fence: the fence to add
268 : * @usage: how the fence is used, see enum dma_resv_usage
269 : *
270 : * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
271 : * dma_resv_reserve_fences() has been called.
272 : *
273 : * See also &dma_resv.fence for a discussion of the semantics.
274 : */
275 0 : void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
276 : enum dma_resv_usage usage)
277 : {
278 : struct dma_resv_list *fobj;
279 : struct dma_fence *old;
280 : unsigned int i, count;
281 :
282 0 : dma_fence_get(fence);
283 :
284 : dma_resv_assert_held(obj);
285 :
286 : /* Drivers should not add containers here, instead add each fence
287 : * individually.
288 : */
289 0 : WARN_ON(dma_fence_is_container(fence));
290 :
291 0 : fobj = dma_resv_fences_list(obj);
292 0 : count = fobj->num_fences;
293 :
294 0 : for (i = 0; i < count; ++i) {
295 : enum dma_resv_usage old_usage;
296 :
297 0 : dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
298 0 : if ((old->context == fence->context && old_usage >= usage) ||
299 0 : dma_fence_is_signaled(old)) {
300 0 : dma_resv_list_set(fobj, i, fence, usage);
301 0 : dma_fence_put(old);
302 : return;
303 : }
304 : }
305 :
306 0 : BUG_ON(fobj->num_fences >= fobj->max_fences);
307 0 : count++;
308 :
309 0 : dma_resv_list_set(fobj, i, fence, usage);
310 : /* pointer update must be visible before we extend the num_fences */
311 0 : smp_store_mb(fobj->num_fences, count);
312 : }
313 : EXPORT_SYMBOL(dma_resv_add_fence);
314 :
315 : /**
316 : * dma_resv_replace_fences - replace fences in the dma_resv obj
317 : * @obj: the reservation object
318 : * @context: the context of the fences to replace
319 : * @replacement: the new fence to use instead
320 : * @usage: how the new fence is used, see enum dma_resv_usage
321 : *
322 : * Replace fences with a specified context with a new fence. Only valid if the
323 : * operation represented by the original fence has no longer access to the
324 : * resources represented by the dma_resv object when the new fence completes.
325 : *
326 : * And example for using this is replacing a preemption fence with a page table
327 : * update fence which makes the resource inaccessible.
328 : */
329 0 : void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
330 : struct dma_fence *replacement,
331 : enum dma_resv_usage usage)
332 : {
333 : struct dma_resv_list *list;
334 : unsigned int i;
335 :
336 : dma_resv_assert_held(obj);
337 :
338 0 : list = dma_resv_fences_list(obj);
339 0 : for (i = 0; list && i < list->num_fences; ++i) {
340 : struct dma_fence *old;
341 :
342 0 : dma_resv_list_entry(list, i, obj, &old, NULL);
343 0 : if (old->context != context)
344 0 : continue;
345 :
346 0 : dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
347 0 : dma_fence_put(old);
348 : }
349 0 : }
350 : EXPORT_SYMBOL(dma_resv_replace_fences);
351 :
352 : /* Restart the unlocked iteration by initializing the cursor object. */
353 : static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
354 : {
355 0 : cursor->index = 0;
356 0 : cursor->num_fences = 0;
357 0 : cursor->fences = dma_resv_fences_list(cursor->obj);
358 0 : if (cursor->fences)
359 0 : cursor->num_fences = cursor->fences->num_fences;
360 0 : cursor->is_restarted = true;
361 : }
362 :
363 : /* Walk to the next not signaled fence and grab a reference to it */
364 0 : static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
365 : {
366 0 : if (!cursor->fences)
367 : return;
368 :
369 : do {
370 : /* Drop the reference from the previous round */
371 0 : dma_fence_put(cursor->fence);
372 :
373 0 : if (cursor->index >= cursor->num_fences) {
374 0 : cursor->fence = NULL;
375 0 : break;
376 :
377 : }
378 :
379 0 : dma_resv_list_entry(cursor->fences, cursor->index++,
380 : cursor->obj, &cursor->fence,
381 : &cursor->fence_usage);
382 0 : cursor->fence = dma_fence_get_rcu(cursor->fence);
383 0 : if (!cursor->fence) {
384 0 : dma_resv_iter_restart_unlocked(cursor);
385 0 : continue;
386 : }
387 :
388 0 : if (!dma_fence_is_signaled(cursor->fence) &&
389 0 : cursor->usage >= cursor->fence_usage)
390 : break;
391 : } while (true);
392 : }
393 :
394 : /**
395 : * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
396 : * @cursor: the cursor with the current position
397 : *
398 : * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
399 : *
400 : * Beware that the iterator can be restarted. Code which accumulates statistics
401 : * or similar needs to check for this with dma_resv_iter_is_restarted(). For
402 : * this reason prefer the locked dma_resv_iter_first() whenver possible.
403 : *
404 : * Returns the first fence from an unlocked dma_resv obj.
405 : */
406 0 : struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
407 : {
408 : rcu_read_lock();
409 : do {
410 0 : dma_resv_iter_restart_unlocked(cursor);
411 0 : dma_resv_iter_walk_unlocked(cursor);
412 0 : } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
413 : rcu_read_unlock();
414 :
415 0 : return cursor->fence;
416 : }
417 : EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
418 :
419 : /**
420 : * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
421 : * @cursor: the cursor with the current position
422 : *
423 : * Beware that the iterator can be restarted. Code which accumulates statistics
424 : * or similar needs to check for this with dma_resv_iter_is_restarted(). For
425 : * this reason prefer the locked dma_resv_iter_next() whenver possible.
426 : *
427 : * Returns the next fence from an unlocked dma_resv obj.
428 : */
429 0 : struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
430 : {
431 : bool restart;
432 :
433 : rcu_read_lock();
434 0 : cursor->is_restarted = false;
435 0 : restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
436 : do {
437 0 : if (restart)
438 : dma_resv_iter_restart_unlocked(cursor);
439 0 : dma_resv_iter_walk_unlocked(cursor);
440 0 : restart = true;
441 0 : } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
442 : rcu_read_unlock();
443 :
444 0 : return cursor->fence;
445 : }
446 : EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
447 :
448 : /**
449 : * dma_resv_iter_first - first fence from a locked dma_resv object
450 : * @cursor: cursor to record the current position
451 : *
452 : * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
453 : *
454 : * Return the first fence in the dma_resv object while holding the
455 : * &dma_resv.lock.
456 : */
457 0 : struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
458 : {
459 : struct dma_fence *fence;
460 :
461 0 : dma_resv_assert_held(cursor->obj);
462 :
463 0 : cursor->index = 0;
464 0 : cursor->fences = dma_resv_fences_list(cursor->obj);
465 :
466 0 : fence = dma_resv_iter_next(cursor);
467 0 : cursor->is_restarted = true;
468 0 : return fence;
469 : }
470 : EXPORT_SYMBOL_GPL(dma_resv_iter_first);
471 :
472 : /**
473 : * dma_resv_iter_next - next fence from a locked dma_resv object
474 : * @cursor: cursor to record the current position
475 : *
476 : * Return the next fences from the dma_resv object while holding the
477 : * &dma_resv.lock.
478 : */
479 0 : struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
480 : {
481 : struct dma_fence *fence;
482 :
483 0 : dma_resv_assert_held(cursor->obj);
484 :
485 0 : cursor->is_restarted = false;
486 :
487 : do {
488 0 : if (!cursor->fences ||
489 0 : cursor->index >= cursor->fences->num_fences)
490 : return NULL;
491 :
492 0 : dma_resv_list_entry(cursor->fences, cursor->index++,
493 : cursor->obj, &fence, &cursor->fence_usage);
494 0 : } while (cursor->fence_usage > cursor->usage);
495 :
496 : return fence;
497 : }
498 : EXPORT_SYMBOL_GPL(dma_resv_iter_next);
499 :
500 : /**
501 : * dma_resv_copy_fences - Copy all fences from src to dst.
502 : * @dst: the destination reservation object
503 : * @src: the source reservation object
504 : *
505 : * Copy all fences from src to dst. dst-lock must be held.
506 : */
507 0 : int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
508 : {
509 : struct dma_resv_iter cursor;
510 : struct dma_resv_list *list;
511 : struct dma_fence *f;
512 :
513 : dma_resv_assert_held(dst);
514 :
515 0 : list = NULL;
516 :
517 0 : dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
518 0 : dma_resv_for_each_fence_unlocked(&cursor, f) {
519 :
520 0 : if (dma_resv_iter_is_restarted(&cursor)) {
521 0 : dma_resv_list_free(list);
522 :
523 0 : list = dma_resv_list_alloc(cursor.num_fences);
524 0 : if (!list) {
525 0 : dma_resv_iter_end(&cursor);
526 : return -ENOMEM;
527 : }
528 0 : list->num_fences = 0;
529 : }
530 :
531 0 : dma_fence_get(f);
532 0 : dma_resv_list_set(list, list->num_fences++, f,
533 : dma_resv_iter_usage(&cursor));
534 : }
535 0 : dma_resv_iter_end(&cursor);
536 :
537 0 : list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
538 0 : dma_resv_list_free(list);
539 0 : return 0;
540 : }
541 : EXPORT_SYMBOL(dma_resv_copy_fences);
542 :
543 : /**
544 : * dma_resv_get_fences - Get an object's fences
545 : * fences without update side lock held
546 : * @obj: the reservation object
547 : * @usage: controls which fences to include, see enum dma_resv_usage.
548 : * @num_fences: the number of fences returned
549 : * @fences: the array of fence ptrs returned (array is krealloc'd to the
550 : * required size, and must be freed by caller)
551 : *
552 : * Retrieve all fences from the reservation object.
553 : * Returns either zero or -ENOMEM.
554 : */
555 0 : int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
556 : unsigned int *num_fences, struct dma_fence ***fences)
557 : {
558 : struct dma_resv_iter cursor;
559 : struct dma_fence *fence;
560 :
561 0 : *num_fences = 0;
562 0 : *fences = NULL;
563 :
564 0 : dma_resv_iter_begin(&cursor, obj, usage);
565 0 : dma_resv_for_each_fence_unlocked(&cursor, fence) {
566 :
567 0 : if (dma_resv_iter_is_restarted(&cursor)) {
568 : unsigned int count;
569 :
570 0 : while (*num_fences)
571 0 : dma_fence_put((*fences)[--(*num_fences)]);
572 :
573 0 : count = cursor.num_fences + 1;
574 :
575 : /* Eventually re-allocate the array */
576 0 : *fences = krealloc_array(*fences, count,
577 : sizeof(void *),
578 : GFP_KERNEL);
579 0 : if (count && !*fences) {
580 0 : dma_resv_iter_end(&cursor);
581 : return -ENOMEM;
582 : }
583 : }
584 :
585 0 : (*fences)[(*num_fences)++] = dma_fence_get(fence);
586 : }
587 0 : dma_resv_iter_end(&cursor);
588 :
589 : return 0;
590 : }
591 : EXPORT_SYMBOL_GPL(dma_resv_get_fences);
592 :
593 : /**
594 : * dma_resv_get_singleton - Get a single fence for all the fences
595 : * @obj: the reservation object
596 : * @usage: controls which fences to include, see enum dma_resv_usage.
597 : * @fence: the resulting fence
598 : *
599 : * Get a single fence representing all the fences inside the resv object.
600 : * Returns either 0 for success or -ENOMEM.
601 : *
602 : * Warning: This can't be used like this when adding the fence back to the resv
603 : * object since that can lead to stack corruption when finalizing the
604 : * dma_fence_array.
605 : *
606 : * Returns 0 on success and negative error values on failure.
607 : */
608 0 : int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
609 : struct dma_fence **fence)
610 : {
611 : struct dma_fence_array *array;
612 : struct dma_fence **fences;
613 : unsigned count;
614 : int r;
615 :
616 0 : r = dma_resv_get_fences(obj, usage, &count, &fences);
617 0 : if (r)
618 : return r;
619 :
620 0 : if (count == 0) {
621 0 : *fence = NULL;
622 0 : return 0;
623 : }
624 :
625 0 : if (count == 1) {
626 0 : *fence = fences[0];
627 0 : kfree(fences);
628 0 : return 0;
629 : }
630 :
631 0 : array = dma_fence_array_create(count, fences,
632 : dma_fence_context_alloc(1),
633 : 1, false);
634 0 : if (!array) {
635 0 : while (count--)
636 0 : dma_fence_put(fences[count]);
637 0 : kfree(fences);
638 0 : return -ENOMEM;
639 : }
640 :
641 0 : *fence = &array->base;
642 0 : return 0;
643 : }
644 : EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
645 :
646 : /**
647 : * dma_resv_wait_timeout - Wait on reservation's objects fences
648 : * @obj: the reservation object
649 : * @usage: controls which fences to include, see enum dma_resv_usage.
650 : * @intr: if true, do interruptible wait
651 : * @timeout: timeout value in jiffies or zero to return immediately
652 : *
653 : * Callers are not required to hold specific locks, but maybe hold
654 : * dma_resv_lock() already
655 : * RETURNS
656 : * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
657 : * greater than zer on success.
658 : */
659 0 : long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
660 : bool intr, unsigned long timeout)
661 : {
662 0 : long ret = timeout ? timeout : 1;
663 : struct dma_resv_iter cursor;
664 : struct dma_fence *fence;
665 :
666 0 : dma_resv_iter_begin(&cursor, obj, usage);
667 0 : dma_resv_for_each_fence_unlocked(&cursor, fence) {
668 :
669 0 : ret = dma_fence_wait_timeout(fence, intr, ret);
670 0 : if (ret <= 0) {
671 0 : dma_resv_iter_end(&cursor);
672 : return ret;
673 : }
674 : }
675 0 : dma_resv_iter_end(&cursor);
676 :
677 : return ret;
678 : }
679 : EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
680 :
681 :
682 : /**
683 : * dma_resv_test_signaled - Test if a reservation object's fences have been
684 : * signaled.
685 : * @obj: the reservation object
686 : * @usage: controls which fences to include, see enum dma_resv_usage.
687 : *
688 : * Callers are not required to hold specific locks, but maybe hold
689 : * dma_resv_lock() already.
690 : *
691 : * RETURNS
692 : *
693 : * True if all fences signaled, else false.
694 : */
695 0 : bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
696 : {
697 : struct dma_resv_iter cursor;
698 : struct dma_fence *fence;
699 :
700 0 : dma_resv_iter_begin(&cursor, obj, usage);
701 0 : dma_resv_for_each_fence_unlocked(&cursor, fence) {
702 0 : dma_resv_iter_end(&cursor);
703 : return false;
704 : }
705 0 : dma_resv_iter_end(&cursor);
706 : return true;
707 : }
708 : EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
709 :
710 : /**
711 : * dma_resv_describe - Dump description of the resv object into seq_file
712 : * @obj: the reservation object
713 : * @seq: the seq_file to dump the description into
714 : *
715 : * Dump a textual description of the fences inside an dma_resv object into the
716 : * seq_file.
717 : */
718 0 : void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
719 : {
720 : static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
721 : struct dma_resv_iter cursor;
722 : struct dma_fence *fence;
723 :
724 0 : dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
725 0 : seq_printf(seq, "\t%s fence:",
726 0 : usage[dma_resv_iter_usage(&cursor)]);
727 0 : dma_fence_describe(fence, seq);
728 : }
729 0 : }
730 : EXPORT_SYMBOL_GPL(dma_resv_describe);
731 :
732 : #if IS_ENABLED(CONFIG_LOCKDEP)
733 : static int __init dma_resv_lockdep(void)
734 : {
735 : struct mm_struct *mm = mm_alloc();
736 : struct ww_acquire_ctx ctx;
737 : struct dma_resv obj;
738 : struct address_space mapping;
739 : int ret;
740 :
741 : if (!mm)
742 : return -ENOMEM;
743 :
744 : dma_resv_init(&obj);
745 : address_space_init_once(&mapping);
746 :
747 : mmap_read_lock(mm);
748 : ww_acquire_init(&ctx, &reservation_ww_class);
749 : ret = dma_resv_lock(&obj, &ctx);
750 : if (ret == -EDEADLK)
751 : dma_resv_lock_slow(&obj, &ctx);
752 : fs_reclaim_acquire(GFP_KERNEL);
753 : /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
754 : i_mmap_lock_write(&mapping);
755 : i_mmap_unlock_write(&mapping);
756 : #ifdef CONFIG_MMU_NOTIFIER
757 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
758 : __dma_fence_might_wait();
759 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
760 : #else
761 : __dma_fence_might_wait();
762 : #endif
763 : fs_reclaim_release(GFP_KERNEL);
764 : ww_mutex_unlock(&obj.lock);
765 : ww_acquire_fini(&ctx);
766 : mmap_read_unlock(mm);
767 :
768 : mmput(mm);
769 :
770 : return 0;
771 : }
772 : subsys_initcall(dma_resv_lockdep);
773 : #endif
|