Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 : /**************************************************************************
3 : *
4 : * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 : * All Rights Reserved.
6 : *
7 : * Permission is hereby granted, free of charge, to any person obtaining a
8 : * copy of this software and associated documentation files (the
9 : * "Software"), to deal in the Software without restriction, including
10 : * without limitation the rights to use, copy, modify, merge, publish,
11 : * distribute, sub license, and/or sell copies of the Software, and to
12 : * permit persons to whom the Software is furnished to do so, subject to
13 : * the following conditions:
14 : *
15 : * The above copyright notice and this permission notice (including the
16 : * next paragraph) shall be included in all copies or substantial portions
17 : * of the Software.
18 : *
19 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 : *
27 : **************************************************************************/
28 : /*
29 : * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 : */
31 :
32 : #define pr_fmt(fmt) "[TTM] " fmt
33 :
34 : #include <linux/sched.h>
35 : #include <linux/shmem_fs.h>
36 : #include <linux/file.h>
37 : #include <linux/module.h>
38 : #include <drm/drm_cache.h>
39 : #include <drm/ttm/ttm_bo_driver.h>
40 :
41 : #include "ttm_module.h"
42 :
43 : static unsigned long ttm_pages_limit;
44 :
45 : MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
46 : module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
47 :
48 : static unsigned long ttm_dma32_pages_limit;
49 :
50 : MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
51 : module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
52 :
53 : static atomic_long_t ttm_pages_allocated;
54 : static atomic_long_t ttm_dma32_pages_allocated;
55 :
56 : /*
57 : * Allocates a ttm structure for the given BO.
58 : */
59 0 : int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
60 : {
61 0 : struct ttm_device *bdev = bo->bdev;
62 0 : uint32_t page_flags = 0;
63 :
64 : dma_resv_assert_held(bo->base.resv);
65 :
66 0 : if (bo->ttm)
67 : return 0;
68 :
69 0 : switch (bo->type) {
70 : case ttm_bo_type_device:
71 0 : if (zero_alloc)
72 0 : page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
73 : break;
74 : case ttm_bo_type_kernel:
75 : break;
76 : case ttm_bo_type_sg:
77 0 : page_flags |= TTM_TT_FLAG_EXTERNAL;
78 0 : break;
79 : default:
80 0 : pr_err("Illegal buffer object type\n");
81 0 : return -EINVAL;
82 : }
83 :
84 0 : bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
85 0 : if (unlikely(bo->ttm == NULL))
86 : return -ENOMEM;
87 :
88 0 : WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
89 : !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
90 :
91 : return 0;
92 : }
93 :
94 : /*
95 : * Allocates storage for pointers to the pages that back the ttm.
96 : */
97 : static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
98 : {
99 0 : ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
100 0 : if (!ttm->pages)
101 : return -ENOMEM;
102 :
103 : return 0;
104 : }
105 :
106 0 : static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
107 : {
108 0 : ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
109 : sizeof(*ttm->dma_address), GFP_KERNEL);
110 0 : if (!ttm->pages)
111 : return -ENOMEM;
112 :
113 0 : ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
114 0 : return 0;
115 : }
116 :
117 : static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
118 : {
119 0 : ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
120 : GFP_KERNEL);
121 0 : if (!ttm->dma_address)
122 : return -ENOMEM;
123 :
124 : return 0;
125 : }
126 :
127 0 : void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
128 : {
129 0 : bdev->funcs->ttm_tt_destroy(bdev, ttm);
130 0 : }
131 :
132 : static void ttm_tt_init_fields(struct ttm_tt *ttm,
133 : struct ttm_buffer_object *bo,
134 : uint32_t page_flags,
135 : enum ttm_caching caching,
136 : unsigned long extra_pages)
137 : {
138 0 : ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
139 0 : ttm->caching = ttm_cached;
140 0 : ttm->page_flags = page_flags;
141 0 : ttm->dma_address = NULL;
142 0 : ttm->swap_storage = NULL;
143 0 : ttm->sg = bo->sg;
144 0 : ttm->caching = caching;
145 : }
146 :
147 0 : int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
148 : uint32_t page_flags, enum ttm_caching caching,
149 : unsigned long extra_pages)
150 : {
151 0 : ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
152 :
153 0 : if (ttm_tt_alloc_page_directory(ttm)) {
154 0 : pr_err("Failed allocating page table\n");
155 0 : return -ENOMEM;
156 : }
157 : return 0;
158 : }
159 : EXPORT_SYMBOL(ttm_tt_init);
160 :
161 0 : void ttm_tt_fini(struct ttm_tt *ttm)
162 : {
163 0 : WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
164 :
165 0 : if (ttm->swap_storage)
166 0 : fput(ttm->swap_storage);
167 0 : ttm->swap_storage = NULL;
168 :
169 0 : if (ttm->pages)
170 0 : kvfree(ttm->pages);
171 : else
172 0 : kvfree(ttm->dma_address);
173 0 : ttm->pages = NULL;
174 0 : ttm->dma_address = NULL;
175 0 : }
176 : EXPORT_SYMBOL(ttm_tt_fini);
177 :
178 0 : int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
179 : uint32_t page_flags, enum ttm_caching caching)
180 : {
181 : int ret;
182 :
183 0 : ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
184 :
185 0 : if (page_flags & TTM_TT_FLAG_EXTERNAL)
186 0 : ret = ttm_sg_tt_alloc_page_directory(ttm);
187 : else
188 0 : ret = ttm_dma_tt_alloc_page_directory(ttm);
189 0 : if (ret) {
190 0 : pr_err("Failed allocating page table\n");
191 0 : return -ENOMEM;
192 : }
193 : return 0;
194 : }
195 : EXPORT_SYMBOL(ttm_sg_tt_init);
196 :
197 0 : int ttm_tt_swapin(struct ttm_tt *ttm)
198 : {
199 : struct address_space *swap_space;
200 : struct file *swap_storage;
201 : struct page *from_page;
202 : struct page *to_page;
203 : gfp_t gfp_mask;
204 : int i, ret;
205 :
206 0 : swap_storage = ttm->swap_storage;
207 0 : BUG_ON(swap_storage == NULL);
208 :
209 0 : swap_space = swap_storage->f_mapping;
210 0 : gfp_mask = mapping_gfp_mask(swap_space);
211 :
212 0 : for (i = 0; i < ttm->num_pages; ++i) {
213 0 : from_page = shmem_read_mapping_page_gfp(swap_space, i,
214 : gfp_mask);
215 0 : if (IS_ERR(from_page)) {
216 0 : ret = PTR_ERR(from_page);
217 0 : goto out_err;
218 : }
219 0 : to_page = ttm->pages[i];
220 0 : if (unlikely(to_page == NULL)) {
221 : ret = -ENOMEM;
222 : goto out_err;
223 : }
224 :
225 0 : copy_highpage(to_page, from_page);
226 0 : put_page(from_page);
227 : }
228 :
229 0 : fput(swap_storage);
230 0 : ttm->swap_storage = NULL;
231 0 : ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
232 :
233 0 : return 0;
234 :
235 : out_err:
236 : return ret;
237 : }
238 :
239 : /**
240 : * ttm_tt_swapout - swap out tt object
241 : *
242 : * @bdev: TTM device structure.
243 : * @ttm: The struct ttm_tt.
244 : * @gfp_flags: Flags to use for memory allocation.
245 : *
246 : * Swapout a TT object to a shmem_file, return number of pages swapped out or
247 : * negative error code.
248 : */
249 0 : int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
250 : gfp_t gfp_flags)
251 : {
252 0 : loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
253 : struct address_space *swap_space;
254 : struct file *swap_storage;
255 : struct page *from_page;
256 : struct page *to_page;
257 : int i, ret;
258 :
259 0 : swap_storage = shmem_file_setup("ttm swap", size, 0);
260 0 : if (IS_ERR(swap_storage)) {
261 0 : pr_err("Failed allocating swap storage\n");
262 0 : return PTR_ERR(swap_storage);
263 : }
264 :
265 0 : swap_space = swap_storage->f_mapping;
266 0 : gfp_flags &= mapping_gfp_mask(swap_space);
267 :
268 0 : for (i = 0; i < ttm->num_pages; ++i) {
269 0 : from_page = ttm->pages[i];
270 0 : if (unlikely(from_page == NULL))
271 0 : continue;
272 :
273 0 : to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
274 0 : if (IS_ERR(to_page)) {
275 0 : ret = PTR_ERR(to_page);
276 : goto out_err;
277 : }
278 0 : copy_highpage(to_page, from_page);
279 0 : set_page_dirty(to_page);
280 0 : mark_page_accessed(to_page);
281 0 : put_page(to_page);
282 : }
283 :
284 0 : ttm_tt_unpopulate(bdev, ttm);
285 0 : ttm->swap_storage = swap_storage;
286 0 : ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
287 :
288 0 : return ttm->num_pages;
289 :
290 : out_err:
291 0 : fput(swap_storage);
292 :
293 0 : return ret;
294 : }
295 :
296 0 : int ttm_tt_populate(struct ttm_device *bdev,
297 : struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
298 : {
299 : int ret;
300 :
301 0 : if (!ttm)
302 : return -EINVAL;
303 :
304 0 : if (ttm_tt_is_populated(ttm))
305 : return 0;
306 :
307 0 : if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
308 0 : atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
309 0 : if (bdev->pool.use_dma32)
310 0 : atomic_long_add(ttm->num_pages,
311 : &ttm_dma32_pages_allocated);
312 : }
313 :
314 0 : while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
315 0 : atomic_long_read(&ttm_dma32_pages_allocated) >
316 : ttm_dma32_pages_limit) {
317 :
318 0 : ret = ttm_global_swapout(ctx, GFP_KERNEL);
319 0 : if (ret == 0)
320 : break;
321 0 : if (ret < 0)
322 : goto error;
323 : }
324 :
325 0 : if (bdev->funcs->ttm_tt_populate)
326 0 : ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
327 : else
328 0 : ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
329 0 : if (ret)
330 : goto error;
331 :
332 0 : ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
333 0 : if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
334 0 : ret = ttm_tt_swapin(ttm);
335 0 : if (unlikely(ret != 0)) {
336 0 : ttm_tt_unpopulate(bdev, ttm);
337 0 : return ret;
338 : }
339 : }
340 :
341 : return 0;
342 :
343 : error:
344 0 : if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
345 0 : atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
346 0 : if (bdev->pool.use_dma32)
347 0 : atomic_long_sub(ttm->num_pages,
348 : &ttm_dma32_pages_allocated);
349 : }
350 : return ret;
351 : }
352 : EXPORT_SYMBOL(ttm_tt_populate);
353 :
354 0 : void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
355 : {
356 0 : if (!ttm_tt_is_populated(ttm))
357 : return;
358 :
359 0 : if (bdev->funcs->ttm_tt_unpopulate)
360 0 : bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
361 : else
362 0 : ttm_pool_free(&bdev->pool, ttm);
363 :
364 0 : if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
365 0 : atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
366 0 : if (bdev->pool.use_dma32)
367 0 : atomic_long_sub(ttm->num_pages,
368 : &ttm_dma32_pages_allocated);
369 : }
370 :
371 0 : ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
372 : }
373 :
374 : #ifdef CONFIG_DEBUG_FS
375 :
376 : /* Test the shrinker functions and dump the result */
377 : static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
378 : {
379 : struct ttm_operation_ctx ctx = { false, false };
380 :
381 : seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
382 : return 0;
383 : }
384 : DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
385 :
386 : #endif
387 :
388 :
389 : /*
390 : * ttm_tt_mgr_init - register with the MM shrinker
391 : *
392 : * Register with the MM shrinker for swapping out BOs.
393 : */
394 0 : void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
395 : {
396 : #ifdef CONFIG_DEBUG_FS
397 : debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
398 : &ttm_tt_debugfs_shrink_fops);
399 : #endif
400 :
401 0 : if (!ttm_pages_limit)
402 0 : ttm_pages_limit = num_pages;
403 :
404 0 : if (!ttm_dma32_pages_limit)
405 0 : ttm_dma32_pages_limit = num_dma32_pages;
406 0 : }
407 :
408 0 : static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
409 : struct iosys_map *dmap,
410 : pgoff_t i)
411 : {
412 0 : struct ttm_kmap_iter_tt *iter_tt =
413 0 : container_of(iter, typeof(*iter_tt), base);
414 :
415 0 : iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
416 : iter_tt->prot));
417 0 : }
418 :
419 0 : static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
420 : struct iosys_map *map)
421 : {
422 0 : kunmap_local(map->vaddr);
423 0 : }
424 :
425 : static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
426 : .map_local = ttm_kmap_iter_tt_map_local,
427 : .unmap_local = ttm_kmap_iter_tt_unmap_local,
428 : .maps_tt = true,
429 : };
430 :
431 : /**
432 : * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
433 : * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
434 : * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
435 : *
436 : * Return: Pointer to the embedded struct ttm_kmap_iter.
437 : */
438 : struct ttm_kmap_iter *
439 0 : ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
440 : struct ttm_tt *tt)
441 : {
442 0 : iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
443 0 : iter_tt->tt = tt;
444 0 : if (tt)
445 0 : iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
446 : else
447 0 : iter_tt->prot = PAGE_KERNEL;
448 :
449 0 : return &iter_tt->base;
450 : }
451 : EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
|