Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0 OR MIT
2 : /*
3 : * Copyright 2020 Advanced Micro Devices, Inc.
4 : *
5 : * Permission is hereby granted, free of charge, to any person obtaining a
6 : * copy of this software and associated documentation files (the "Software"),
7 : * to deal in the Software without restriction, including without limitation
8 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 : * and/or sell copies of the Software, and to permit persons to whom the
10 : * Software is furnished to do so, subject to the following conditions:
11 : *
12 : * The above copyright notice and this permission notice shall be included in
13 : * all copies or substantial portions of the Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 : * OTHER DEALINGS IN THE SOFTWARE.
22 : *
23 : * Authors: Christian König
24 : */
25 :
26 : /* Pooling of allocated pages is necessary because changing the caching
27 : * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 : * invalidate for those addresses.
29 : *
30 : * Additional to that allocations from the DMA coherent API are pooled as well
31 : * cause they are rather slow compared to alloc_pages+map.
32 : */
33 :
34 : #include <linux/module.h>
35 : #include <linux/dma-mapping.h>
36 : #include <linux/highmem.h>
37 : #include <linux/sched/mm.h>
38 :
39 : #ifdef CONFIG_X86
40 : #include <asm/set_memory.h>
41 : #endif
42 :
43 : #include <drm/ttm/ttm_pool.h>
44 : #include <drm/ttm/ttm_bo_driver.h>
45 : #include <drm/ttm/ttm_tt.h>
46 :
47 : #include "ttm_module.h"
48 :
49 : /**
50 : * struct ttm_pool_dma - Helper object for coherent DMA mappings
51 : *
52 : * @addr: original DMA address returned for the mapping
53 : * @vaddr: original vaddr return for the mapping and order in the lower bits
54 : */
55 : struct ttm_pool_dma {
56 : dma_addr_t addr;
57 : unsigned long vaddr;
58 : };
59 :
60 : static unsigned long page_pool_size;
61 :
62 : MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
63 : module_param(page_pool_size, ulong, 0644);
64 :
65 : static atomic_long_t allocated_pages;
66 :
67 : static struct ttm_pool_type global_write_combined[MAX_ORDER];
68 : static struct ttm_pool_type global_uncached[MAX_ORDER];
69 :
70 : static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
71 : static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
72 :
73 : static spinlock_t shrinker_lock;
74 : static struct list_head shrinker_list;
75 : static struct shrinker mm_shrinker;
76 :
77 : /* Allocate pages of size 1 << order with the given gfp_flags */
78 0 : static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
79 : unsigned int order)
80 : {
81 0 : unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
82 : struct ttm_pool_dma *dma;
83 : struct page *p;
84 : void *vaddr;
85 :
86 : /* Don't set the __GFP_COMP flag for higher order allocations.
87 : * Mapping pages directly into an userspace process and calling
88 : * put_page() on a TTM allocated page is illegal.
89 : */
90 0 : if (order)
91 0 : gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
92 : __GFP_KSWAPD_RECLAIM;
93 :
94 0 : if (!pool->use_dma_alloc) {
95 0 : p = alloc_pages(gfp_flags, order);
96 0 : if (p)
97 0 : p->private = order;
98 : return p;
99 : }
100 :
101 0 : dma = kmalloc(sizeof(*dma), GFP_KERNEL);
102 0 : if (!dma)
103 : return NULL;
104 :
105 0 : if (order)
106 0 : attr |= DMA_ATTR_NO_WARN;
107 :
108 0 : vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
109 : &dma->addr, gfp_flags, attr);
110 0 : if (!vaddr)
111 : goto error_free;
112 :
113 : /* TODO: This is an illegal abuse of the DMA API, but we need to rework
114 : * TTM page fault handling and extend the DMA API to clean this up.
115 : */
116 0 : if (is_vmalloc_addr(vaddr))
117 0 : p = vmalloc_to_page(vaddr);
118 : else
119 0 : p = virt_to_page(vaddr);
120 :
121 0 : dma->vaddr = (unsigned long)vaddr | order;
122 0 : p->private = (unsigned long)dma;
123 : return p;
124 :
125 : error_free:
126 0 : kfree(dma);
127 : return NULL;
128 : }
129 :
130 : /* Reset the caching and pages of size 1 << order */
131 0 : static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
132 : unsigned int order, struct page *p)
133 : {
134 0 : unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
135 : struct ttm_pool_dma *dma;
136 : void *vaddr;
137 :
138 : #ifdef CONFIG_X86
139 : /* We don't care that set_pages_wb is inefficient here. This is only
140 : * used when we have to shrink and CPU overhead is irrelevant then.
141 : */
142 : if (caching != ttm_cached && !PageHighMem(p))
143 : set_pages_wb(p, 1 << order);
144 : #endif
145 :
146 0 : if (!pool || !pool->use_dma_alloc) {
147 0 : __free_pages(p, order);
148 : return;
149 : }
150 :
151 0 : if (order)
152 0 : attr |= DMA_ATTR_NO_WARN;
153 :
154 0 : dma = (void *)p->private;
155 0 : vaddr = (void *)(dma->vaddr & PAGE_MASK);
156 0 : dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
157 : attr);
158 0 : kfree(dma);
159 : }
160 :
161 : /* Apply a new caching to an array of pages */
162 : static int ttm_pool_apply_caching(struct page **first, struct page **last,
163 : enum ttm_caching caching)
164 : {
165 : #ifdef CONFIG_X86
166 : unsigned int num_pages = last - first;
167 :
168 : if (!num_pages)
169 : return 0;
170 :
171 : switch (caching) {
172 : case ttm_cached:
173 : break;
174 : case ttm_write_combined:
175 : return set_pages_array_wc(first, num_pages);
176 : case ttm_uncached:
177 : return set_pages_array_uc(first, num_pages);
178 : }
179 : #endif
180 : return 0;
181 : }
182 :
183 : /* Map pages of 1 << order size and fill the DMA address array */
184 0 : static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
185 : struct page *p, dma_addr_t **dma_addr)
186 : {
187 : dma_addr_t addr;
188 : unsigned int i;
189 :
190 0 : if (pool->use_dma_alloc) {
191 0 : struct ttm_pool_dma *dma = (void *)p->private;
192 :
193 0 : addr = dma->addr;
194 : } else {
195 0 : size_t size = (1ULL << order) * PAGE_SIZE;
196 :
197 0 : addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
198 0 : if (dma_mapping_error(pool->dev, addr))
199 : return -EFAULT;
200 : }
201 :
202 0 : for (i = 1 << order; i ; --i) {
203 0 : *(*dma_addr)++ = addr;
204 0 : addr += PAGE_SIZE;
205 : }
206 :
207 : return 0;
208 : }
209 :
210 : /* Unmap pages of 1 << order size */
211 : static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
212 : unsigned int num_pages)
213 : {
214 : /* Unmapped while freeing the page */
215 0 : if (pool->use_dma_alloc)
216 : return;
217 :
218 0 : dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
219 : DMA_BIDIRECTIONAL);
220 : }
221 :
222 : /* Give pages into a specific pool_type */
223 0 : static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
224 : {
225 0 : unsigned int i, num_pages = 1 << pt->order;
226 :
227 0 : for (i = 0; i < num_pages; ++i) {
228 0 : if (PageHighMem(p))
229 : clear_highpage(p + i);
230 : else
231 0 : clear_page(page_address(p + i));
232 : }
233 :
234 0 : spin_lock(&pt->lock);
235 0 : list_add(&p->lru, &pt->pages);
236 0 : spin_unlock(&pt->lock);
237 0 : atomic_long_add(1 << pt->order, &allocated_pages);
238 0 : }
239 :
240 : /* Take pages from a specific pool_type, return NULL when nothing available */
241 : static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
242 : {
243 : struct page *p;
244 :
245 0 : spin_lock(&pt->lock);
246 0 : p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
247 0 : if (p) {
248 0 : atomic_long_sub(1 << pt->order, &allocated_pages);
249 0 : list_del(&p->lru);
250 : }
251 0 : spin_unlock(&pt->lock);
252 :
253 : return p;
254 : }
255 :
256 : /* Initialize and add a pool type to the global shrinker list */
257 : static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
258 : enum ttm_caching caching, unsigned int order)
259 : {
260 0 : pt->pool = pool;
261 0 : pt->caching = caching;
262 0 : pt->order = order;
263 0 : spin_lock_init(&pt->lock);
264 0 : INIT_LIST_HEAD(&pt->pages);
265 :
266 0 : spin_lock(&shrinker_lock);
267 0 : list_add_tail(&pt->shrinker_list, &shrinker_list);
268 0 : spin_unlock(&shrinker_lock);
269 : }
270 :
271 : /* Remove a pool_type from the global shrinker list and free all pages */
272 0 : static void ttm_pool_type_fini(struct ttm_pool_type *pt)
273 : {
274 : struct page *p;
275 :
276 0 : spin_lock(&shrinker_lock);
277 0 : list_del(&pt->shrinker_list);
278 : spin_unlock(&shrinker_lock);
279 :
280 0 : while ((p = ttm_pool_type_take(pt)))
281 0 : ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
282 0 : }
283 :
284 : /* Return the pool_type to use for the given caching and order */
285 : static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
286 : enum ttm_caching caching,
287 : unsigned int order)
288 : {
289 0 : if (pool->use_dma_alloc)
290 0 : return &pool->caching[caching].orders[order];
291 :
292 : #ifdef CONFIG_X86
293 : switch (caching) {
294 : case ttm_write_combined:
295 : if (pool->use_dma32)
296 : return &global_dma32_write_combined[order];
297 :
298 : return &global_write_combined[order];
299 : case ttm_uncached:
300 : if (pool->use_dma32)
301 : return &global_dma32_uncached[order];
302 :
303 : return &global_uncached[order];
304 : default:
305 : break;
306 : }
307 : #endif
308 :
309 : return NULL;
310 : }
311 :
312 : /* Free pages using the global shrinker list */
313 0 : static unsigned int ttm_pool_shrink(void)
314 : {
315 : struct ttm_pool_type *pt;
316 : unsigned int num_pages;
317 : struct page *p;
318 :
319 0 : spin_lock(&shrinker_lock);
320 0 : pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
321 0 : list_move_tail(&pt->shrinker_list, &shrinker_list);
322 0 : spin_unlock(&shrinker_lock);
323 :
324 0 : p = ttm_pool_type_take(pt);
325 0 : if (p) {
326 0 : ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
327 0 : num_pages = 1 << pt->order;
328 : } else {
329 : num_pages = 0;
330 : }
331 :
332 0 : return num_pages;
333 : }
334 :
335 : /* Return the allocation order based for a page */
336 : static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
337 : {
338 0 : if (pool->use_dma_alloc) {
339 0 : struct ttm_pool_dma *dma = (void *)p->private;
340 :
341 0 : return dma->vaddr & ~PAGE_MASK;
342 : }
343 :
344 0 : return p->private;
345 : }
346 :
347 : /**
348 : * ttm_pool_alloc - Fill a ttm_tt object
349 : *
350 : * @pool: ttm_pool to use
351 : * @tt: ttm_tt object to fill
352 : * @ctx: operation context
353 : *
354 : * Fill the ttm_tt object with pages and also make sure to DMA map them when
355 : * necessary.
356 : *
357 : * Returns: 0 on successe, negative error code otherwise.
358 : */
359 0 : int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
360 : struct ttm_operation_ctx *ctx)
361 : {
362 0 : unsigned long num_pages = tt->num_pages;
363 0 : dma_addr_t *dma_addr = tt->dma_address;
364 0 : struct page **caching = tt->pages;
365 0 : struct page **pages = tt->pages;
366 0 : gfp_t gfp_flags = GFP_USER;
367 : unsigned int i, order;
368 : struct page *p;
369 : int r;
370 :
371 0 : WARN_ON(!num_pages || ttm_tt_is_populated(tt));
372 0 : WARN_ON(dma_addr && !pool->dev);
373 :
374 0 : if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
375 0 : gfp_flags |= __GFP_ZERO;
376 :
377 0 : if (ctx->gfp_retry_mayfail)
378 0 : gfp_flags |= __GFP_RETRY_MAYFAIL;
379 :
380 0 : if (pool->use_dma32)
381 0 : gfp_flags |= GFP_DMA32;
382 : else
383 0 : gfp_flags |= GFP_HIGHUSER;
384 :
385 0 : for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
386 : num_pages;
387 0 : order = min_t(unsigned int, order, __fls(num_pages))) {
388 0 : bool apply_caching = false;
389 : struct ttm_pool_type *pt;
390 :
391 0 : pt = ttm_pool_select_type(pool, tt->caching, order);
392 0 : p = pt ? ttm_pool_type_take(pt) : NULL;
393 0 : if (p) {
394 : apply_caching = true;
395 : } else {
396 0 : p = ttm_pool_alloc_page(pool, gfp_flags, order);
397 : if (p && PageHighMem(p))
398 : apply_caching = true;
399 : }
400 :
401 0 : if (!p) {
402 0 : if (order) {
403 0 : --order;
404 0 : continue;
405 : }
406 : r = -ENOMEM;
407 : goto error_free_all;
408 : }
409 :
410 : if (apply_caching) {
411 : r = ttm_pool_apply_caching(caching, pages,
412 : tt->caching);
413 : if (r)
414 : goto error_free_page;
415 : caching = pages + (1 << order);
416 : }
417 :
418 0 : if (dma_addr) {
419 0 : r = ttm_pool_map(pool, order, p, &dma_addr);
420 0 : if (r)
421 : goto error_free_page;
422 : }
423 :
424 0 : num_pages -= 1 << order;
425 0 : for (i = 1 << order; i; --i)
426 0 : *(pages++) = p++;
427 : }
428 :
429 : r = ttm_pool_apply_caching(caching, pages, tt->caching);
430 : if (r)
431 : goto error_free_all;
432 :
433 : return 0;
434 :
435 : error_free_page:
436 0 : ttm_pool_free_page(pool, tt->caching, order, p);
437 :
438 : error_free_all:
439 0 : num_pages = tt->num_pages - num_pages;
440 0 : for (i = 0; i < num_pages; ) {
441 0 : order = ttm_pool_page_order(pool, tt->pages[i]);
442 0 : ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
443 0 : i += 1 << order;
444 : }
445 :
446 : return r;
447 : }
448 : EXPORT_SYMBOL(ttm_pool_alloc);
449 :
450 : /**
451 : * ttm_pool_free - Free the backing pages from a ttm_tt object
452 : *
453 : * @pool: Pool to give pages back to.
454 : * @tt: ttm_tt object to unpopulate
455 : *
456 : * Give the packing pages back to a pool or free them
457 : */
458 0 : void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
459 : {
460 : unsigned int i;
461 :
462 0 : for (i = 0; i < tt->num_pages; ) {
463 0 : struct page *p = tt->pages[i];
464 : unsigned int order, num_pages;
465 : struct ttm_pool_type *pt;
466 :
467 0 : order = ttm_pool_page_order(pool, p);
468 0 : num_pages = 1ULL << order;
469 0 : if (tt->dma_address)
470 0 : ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
471 :
472 0 : pt = ttm_pool_select_type(pool, tt->caching, order);
473 0 : if (pt)
474 0 : ttm_pool_type_give(pt, tt->pages[i]);
475 : else
476 0 : ttm_pool_free_page(pool, tt->caching, order,
477 0 : tt->pages[i]);
478 :
479 0 : i += num_pages;
480 : }
481 :
482 0 : while (atomic_long_read(&allocated_pages) > page_pool_size)
483 0 : ttm_pool_shrink();
484 0 : }
485 : EXPORT_SYMBOL(ttm_pool_free);
486 :
487 : /**
488 : * ttm_pool_init - Initialize a pool
489 : *
490 : * @pool: the pool to initialize
491 : * @dev: device for DMA allocations and mappings
492 : * @use_dma_alloc: true if coherent DMA alloc should be used
493 : * @use_dma32: true if GFP_DMA32 should be used
494 : *
495 : * Initialize the pool and its pool types.
496 : */
497 0 : void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
498 : bool use_dma_alloc, bool use_dma32)
499 : {
500 : unsigned int i, j;
501 :
502 0 : WARN_ON(!dev && use_dma_alloc);
503 :
504 0 : pool->dev = dev;
505 0 : pool->use_dma_alloc = use_dma_alloc;
506 0 : pool->use_dma32 = use_dma32;
507 :
508 0 : if (use_dma_alloc) {
509 0 : for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
510 0 : for (j = 0; j < MAX_ORDER; ++j)
511 0 : ttm_pool_type_init(&pool->caching[i].orders[j],
512 : pool, i, j);
513 : }
514 0 : }
515 :
516 : /**
517 : * ttm_pool_fini - Cleanup a pool
518 : *
519 : * @pool: the pool to clean up
520 : *
521 : * Free all pages in the pool and unregister the types from the global
522 : * shrinker.
523 : */
524 0 : void ttm_pool_fini(struct ttm_pool *pool)
525 : {
526 : unsigned int i, j;
527 :
528 0 : if (pool->use_dma_alloc) {
529 0 : for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
530 0 : for (j = 0; j < MAX_ORDER; ++j)
531 0 : ttm_pool_type_fini(&pool->caching[i].orders[j]);
532 : }
533 :
534 : /* We removed the pool types from the LRU, but we need to also make sure
535 : * that no shrinker is concurrently freeing pages from the pool.
536 : */
537 0 : synchronize_shrinkers();
538 0 : }
539 :
540 : /* As long as pages are available make sure to release at least one */
541 0 : static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
542 : struct shrink_control *sc)
543 : {
544 0 : unsigned long num_freed = 0;
545 :
546 : do
547 0 : num_freed += ttm_pool_shrink();
548 0 : while (!num_freed && atomic_long_read(&allocated_pages));
549 :
550 0 : return num_freed;
551 : }
552 :
553 : /* Return the number of pages available or SHRINK_EMPTY if we have none */
554 0 : static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
555 : struct shrink_control *sc)
556 : {
557 0 : unsigned long num_pages = atomic_long_read(&allocated_pages);
558 :
559 0 : return num_pages ? num_pages : SHRINK_EMPTY;
560 : }
561 :
562 : #ifdef CONFIG_DEBUG_FS
563 : /* Count the number of pages available in a pool_type */
564 : static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
565 : {
566 : unsigned int count = 0;
567 : struct page *p;
568 :
569 : spin_lock(&pt->lock);
570 : /* Only used for debugfs, the overhead doesn't matter */
571 : list_for_each_entry(p, &pt->pages, lru)
572 : ++count;
573 : spin_unlock(&pt->lock);
574 :
575 : return count;
576 : }
577 :
578 : /* Print a nice header for the order */
579 : static void ttm_pool_debugfs_header(struct seq_file *m)
580 : {
581 : unsigned int i;
582 :
583 : seq_puts(m, "\t ");
584 : for (i = 0; i < MAX_ORDER; ++i)
585 : seq_printf(m, " ---%2u---", i);
586 : seq_puts(m, "\n");
587 : }
588 :
589 : /* Dump information about the different pool types */
590 : static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
591 : struct seq_file *m)
592 : {
593 : unsigned int i;
594 :
595 : for (i = 0; i < MAX_ORDER; ++i)
596 : seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
597 : seq_puts(m, "\n");
598 : }
599 :
600 : /* Dump the total amount of allocated pages */
601 : static void ttm_pool_debugfs_footer(struct seq_file *m)
602 : {
603 : seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
604 : atomic_long_read(&allocated_pages), page_pool_size);
605 : }
606 :
607 : /* Dump the information for the global pools */
608 : static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
609 : {
610 : ttm_pool_debugfs_header(m);
611 :
612 : spin_lock(&shrinker_lock);
613 : seq_puts(m, "wc\t:");
614 : ttm_pool_debugfs_orders(global_write_combined, m);
615 : seq_puts(m, "uc\t:");
616 : ttm_pool_debugfs_orders(global_uncached, m);
617 : seq_puts(m, "wc 32\t:");
618 : ttm_pool_debugfs_orders(global_dma32_write_combined, m);
619 : seq_puts(m, "uc 32\t:");
620 : ttm_pool_debugfs_orders(global_dma32_uncached, m);
621 : spin_unlock(&shrinker_lock);
622 :
623 : ttm_pool_debugfs_footer(m);
624 :
625 : return 0;
626 : }
627 : DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
628 :
629 : /**
630 : * ttm_pool_debugfs - Debugfs dump function for a pool
631 : *
632 : * @pool: the pool to dump the information for
633 : * @m: seq_file to dump to
634 : *
635 : * Make a debugfs dump with the per pool and global information.
636 : */
637 : int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
638 : {
639 : unsigned int i;
640 :
641 : if (!pool->use_dma_alloc) {
642 : seq_puts(m, "unused\n");
643 : return 0;
644 : }
645 :
646 : ttm_pool_debugfs_header(m);
647 :
648 : spin_lock(&shrinker_lock);
649 : for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
650 : seq_puts(m, "DMA ");
651 : switch (i) {
652 : case ttm_cached:
653 : seq_puts(m, "\t:");
654 : break;
655 : case ttm_write_combined:
656 : seq_puts(m, "wc\t:");
657 : break;
658 : case ttm_uncached:
659 : seq_puts(m, "uc\t:");
660 : break;
661 : }
662 : ttm_pool_debugfs_orders(pool->caching[i].orders, m);
663 : }
664 : spin_unlock(&shrinker_lock);
665 :
666 : ttm_pool_debugfs_footer(m);
667 : return 0;
668 : }
669 : EXPORT_SYMBOL(ttm_pool_debugfs);
670 :
671 : /* Test the shrinker functions and dump the result */
672 : static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
673 : {
674 : struct shrink_control sc = { .gfp_mask = GFP_NOFS };
675 :
676 : fs_reclaim_acquire(GFP_KERNEL);
677 : seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
678 : ttm_pool_shrinker_scan(&mm_shrinker, &sc));
679 : fs_reclaim_release(GFP_KERNEL);
680 :
681 : return 0;
682 : }
683 : DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
684 :
685 : #endif
686 :
687 : /**
688 : * ttm_pool_mgr_init - Initialize globals
689 : *
690 : * @num_pages: default number of pages
691 : *
692 : * Initialize the global locks and lists for the MM shrinker.
693 : */
694 0 : int ttm_pool_mgr_init(unsigned long num_pages)
695 : {
696 : unsigned int i;
697 :
698 0 : if (!page_pool_size)
699 0 : page_pool_size = num_pages;
700 :
701 0 : spin_lock_init(&shrinker_lock);
702 0 : INIT_LIST_HEAD(&shrinker_list);
703 :
704 0 : for (i = 0; i < MAX_ORDER; ++i) {
705 0 : ttm_pool_type_init(&global_write_combined[i], NULL,
706 : ttm_write_combined, i);
707 0 : ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
708 :
709 0 : ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
710 : ttm_write_combined, i);
711 0 : ttm_pool_type_init(&global_dma32_uncached[i], NULL,
712 : ttm_uncached, i);
713 : }
714 :
715 : #ifdef CONFIG_DEBUG_FS
716 : debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
717 : &ttm_pool_debugfs_globals_fops);
718 : debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
719 : &ttm_pool_debugfs_shrink_fops);
720 : #endif
721 :
722 0 : mm_shrinker.count_objects = ttm_pool_shrinker_count;
723 0 : mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
724 0 : mm_shrinker.seeks = 1;
725 0 : return register_shrinker(&mm_shrinker);
726 : }
727 :
728 : /**
729 : * ttm_pool_mgr_fini - Finalize globals
730 : *
731 : * Cleanup the global pools and unregister the MM shrinker.
732 : */
733 0 : void ttm_pool_mgr_fini(void)
734 : {
735 : unsigned int i;
736 :
737 0 : for (i = 0; i < MAX_ORDER; ++i) {
738 0 : ttm_pool_type_fini(&global_write_combined[i]);
739 0 : ttm_pool_type_fini(&global_uncached[i]);
740 :
741 0 : ttm_pool_type_fini(&global_dma32_write_combined[i]);
742 0 : ttm_pool_type_fini(&global_dma32_uncached[i]);
743 : }
744 :
745 0 : unregister_shrinker(&mm_shrinker);
746 0 : WARN_ON(!list_empty(&shrinker_list));
747 0 : }
|