Line data Source code
1 : /*
2 : * Copyright 2008 Advanced Micro Devices, Inc.
3 : * Copyright 2008 Red Hat Inc.
4 : * Copyright 2009 Jerome Glisse.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the "Software"),
8 : * to deal in the Software without restriction, including without limitation
9 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 : * and/or sell copies of the Software, and to permit persons to whom the
11 : * Software is furnished to do so, subject to the following conditions:
12 : *
13 : * The above copyright notice and this permission notice shall be included in
14 : * all copies or substantial portions of the Software.
15 : *
16 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 : * OTHER DEALINGS IN THE SOFTWARE.
23 : *
24 : * Authors: Dave Airlie
25 : * Alex Deucher
26 : * Jerome Glisse
27 : */
28 : #include <linux/ktime.h>
29 : #include <linux/module.h>
30 : #include <linux/pagemap.h>
31 : #include <linux/pci.h>
32 : #include <linux/dma-buf.h>
33 :
34 : #include <drm/amdgpu_drm.h>
35 : #include <drm/drm_drv.h>
36 : #include <drm/drm_gem_ttm_helper.h>
37 :
38 : #include "amdgpu.h"
39 : #include "amdgpu_display.h"
40 : #include "amdgpu_dma_buf.h"
41 : #include "amdgpu_xgmi.h"
42 :
43 : static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
44 :
45 0 : static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
46 : {
47 0 : struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
48 0 : struct drm_device *ddev = bo->base.dev;
49 : vm_fault_t ret;
50 : int idx;
51 :
52 0 : ret = ttm_bo_vm_reserve(bo, vmf);
53 0 : if (ret)
54 : return ret;
55 :
56 0 : if (drm_dev_enter(ddev, &idx)) {
57 0 : ret = amdgpu_bo_fault_reserve_notify(bo);
58 0 : if (ret) {
59 0 : drm_dev_exit(idx);
60 0 : goto unlock;
61 : }
62 :
63 0 : ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
64 : TTM_BO_VM_NUM_PREFAULT);
65 :
66 0 : drm_dev_exit(idx);
67 : } else {
68 0 : ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
69 : }
70 0 : if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
71 : return ret;
72 :
73 : unlock:
74 0 : dma_resv_unlock(bo->base.resv);
75 0 : return ret;
76 : }
77 :
78 : static const struct vm_operations_struct amdgpu_gem_vm_ops = {
79 : .fault = amdgpu_gem_fault,
80 : .open = ttm_bo_vm_open,
81 : .close = ttm_bo_vm_close,
82 : .access = ttm_bo_vm_access
83 : };
84 :
85 0 : static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
86 : {
87 0 : struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
88 :
89 0 : if (robj) {
90 0 : amdgpu_mn_unregister(robj);
91 0 : amdgpu_bo_unref(&robj);
92 : }
93 0 : }
94 :
95 0 : int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
96 : int alignment, u32 initial_domain,
97 : u64 flags, enum ttm_bo_type type,
98 : struct dma_resv *resv,
99 : struct drm_gem_object **obj)
100 : {
101 : struct amdgpu_bo *bo;
102 : struct amdgpu_bo_user *ubo;
103 : struct amdgpu_bo_param bp;
104 : int r;
105 :
106 0 : memset(&bp, 0, sizeof(bp));
107 0 : *obj = NULL;
108 :
109 0 : bp.size = size;
110 0 : bp.byte_align = alignment;
111 0 : bp.type = type;
112 0 : bp.resv = resv;
113 0 : bp.preferred_domain = initial_domain;
114 0 : bp.flags = flags;
115 0 : bp.domain = initial_domain;
116 0 : bp.bo_ptr_size = sizeof(struct amdgpu_bo);
117 :
118 0 : r = amdgpu_bo_create_user(adev, &bp, &ubo);
119 0 : if (r)
120 : return r;
121 :
122 0 : bo = &ubo->bo;
123 0 : *obj = &bo->tbo.base;
124 0 : (*obj)->funcs = &amdgpu_gem_object_funcs;
125 :
126 0 : return 0;
127 : }
128 :
129 0 : void amdgpu_gem_force_release(struct amdgpu_device *adev)
130 : {
131 0 : struct drm_device *ddev = adev_to_drm(adev);
132 : struct drm_file *file;
133 :
134 0 : mutex_lock(&ddev->filelist_mutex);
135 :
136 0 : list_for_each_entry(file, &ddev->filelist, lhead) {
137 : struct drm_gem_object *gobj;
138 : int handle;
139 :
140 0 : WARN_ONCE(1, "Still active user space clients!\n");
141 0 : spin_lock(&file->table_lock);
142 0 : idr_for_each_entry(&file->object_idr, gobj, handle) {
143 0 : WARN_ONCE(1, "And also active allocations!\n");
144 0 : drm_gem_object_put(gobj);
145 : }
146 0 : idr_destroy(&file->object_idr);
147 0 : spin_unlock(&file->table_lock);
148 : }
149 :
150 0 : mutex_unlock(&ddev->filelist_mutex);
151 0 : }
152 :
153 : /*
154 : * Call from drm_gem_handle_create which appear in both new and open ioctl
155 : * case.
156 : */
157 0 : static int amdgpu_gem_object_open(struct drm_gem_object *obj,
158 : struct drm_file *file_priv)
159 : {
160 0 : struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
161 0 : struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
162 0 : struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
163 0 : struct amdgpu_vm *vm = &fpriv->vm;
164 : struct amdgpu_bo_va *bo_va;
165 : struct mm_struct *mm;
166 : int r;
167 :
168 0 : mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
169 0 : if (mm && mm != current->mm)
170 : return -EPERM;
171 :
172 0 : if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
173 0 : abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
174 : return -EPERM;
175 :
176 0 : r = amdgpu_bo_reserve(abo, false);
177 0 : if (r)
178 : return r;
179 :
180 0 : bo_va = amdgpu_vm_bo_find(vm, abo);
181 0 : if (!bo_va) {
182 0 : bo_va = amdgpu_vm_bo_add(adev, vm, abo);
183 : } else {
184 0 : ++bo_va->ref_count;
185 : }
186 : amdgpu_bo_unreserve(abo);
187 0 : return 0;
188 : }
189 :
190 0 : static void amdgpu_gem_object_close(struct drm_gem_object *obj,
191 : struct drm_file *file_priv)
192 : {
193 0 : struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
194 0 : struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195 0 : struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
196 0 : struct amdgpu_vm *vm = &fpriv->vm;
197 :
198 : struct amdgpu_bo_list_entry vm_pd;
199 : struct list_head list, duplicates;
200 0 : struct dma_fence *fence = NULL;
201 : struct ttm_validate_buffer tv;
202 : struct ww_acquire_ctx ticket;
203 : struct amdgpu_bo_va *bo_va;
204 : long r;
205 :
206 0 : INIT_LIST_HEAD(&list);
207 0 : INIT_LIST_HEAD(&duplicates);
208 :
209 0 : tv.bo = &bo->tbo;
210 0 : tv.num_shared = 2;
211 0 : list_add(&tv.head, &list);
212 :
213 0 : amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
214 :
215 0 : r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
216 0 : if (r) {
217 0 : dev_err(adev->dev, "leaking bo va because "
218 : "we fail to reserve bo (%ld)\n", r);
219 0 : return;
220 : }
221 0 : bo_va = amdgpu_vm_bo_find(vm, bo);
222 0 : if (!bo_va || --bo_va->ref_count)
223 : goto out_unlock;
224 :
225 0 : amdgpu_vm_bo_del(adev, bo_va);
226 0 : if (!amdgpu_vm_ready(vm))
227 : goto out_unlock;
228 :
229 0 : r = amdgpu_vm_clear_freed(adev, vm, &fence);
230 0 : if (r || !fence)
231 : goto out_unlock;
232 :
233 0 : amdgpu_bo_fence(bo, fence, true);
234 0 : dma_fence_put(fence);
235 :
236 : out_unlock:
237 0 : if (unlikely(r < 0))
238 0 : dev_err(adev->dev, "failed to clear page "
239 : "tables on GEM object close (%ld)\n", r);
240 0 : ttm_eu_backoff_reservation(&ticket, &list);
241 : }
242 :
243 0 : static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
244 : {
245 0 : struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
246 :
247 0 : if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
248 : return -EPERM;
249 0 : if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
250 : return -EPERM;
251 :
252 : /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
253 : * for debugger access to invisible VRAM. Should have used MAP_SHARED
254 : * instead. Clearing VM_MAYWRITE prevents the mapping from ever
255 : * becoming writable and makes is_cow_mapping(vm_flags) false.
256 : */
257 0 : if (is_cow_mapping(vma->vm_flags) &&
258 0 : !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
259 0 : vma->vm_flags &= ~VM_MAYWRITE;
260 :
261 0 : return drm_gem_ttm_mmap(obj, vma);
262 : }
263 :
264 : static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
265 : .free = amdgpu_gem_object_free,
266 : .open = amdgpu_gem_object_open,
267 : .close = amdgpu_gem_object_close,
268 : .export = amdgpu_gem_prime_export,
269 : .vmap = drm_gem_ttm_vmap,
270 : .vunmap = drm_gem_ttm_vunmap,
271 : .mmap = amdgpu_gem_object_mmap,
272 : .vm_ops = &amdgpu_gem_vm_ops,
273 : };
274 :
275 : /*
276 : * GEM ioctls.
277 : */
278 0 : int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
279 : struct drm_file *filp)
280 : {
281 0 : struct amdgpu_device *adev = drm_to_adev(dev);
282 0 : struct amdgpu_fpriv *fpriv = filp->driver_priv;
283 0 : struct amdgpu_vm *vm = &fpriv->vm;
284 0 : union drm_amdgpu_gem_create *args = data;
285 0 : uint64_t flags = args->in.domain_flags;
286 0 : uint64_t size = args->in.bo_size;
287 0 : struct dma_resv *resv = NULL;
288 : struct drm_gem_object *gobj;
289 : uint32_t handle, initial_domain;
290 : int r;
291 :
292 : /* reject invalid gem flags */
293 0 : if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
294 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
295 : AMDGPU_GEM_CREATE_CPU_GTT_USWC |
296 : AMDGPU_GEM_CREATE_VRAM_CLEARED |
297 : AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
298 : AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
299 : AMDGPU_GEM_CREATE_ENCRYPTED |
300 : AMDGPU_GEM_CREATE_DISCARDABLE))
301 : return -EINVAL;
302 :
303 : /* reject invalid gem domains */
304 0 : if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
305 : return -EINVAL;
306 :
307 0 : if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
308 0 : DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
309 : return -EINVAL;
310 : }
311 :
312 : /* create a gem object to contain this object in */
313 0 : if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
314 : AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
315 0 : if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
316 : /* if gds bo is created from user space, it must be
317 : * passed to bo list
318 : */
319 0 : DRM_ERROR("GDS bo cannot be per-vm-bo\n");
320 0 : return -EINVAL;
321 : }
322 0 : flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
323 : }
324 :
325 0 : if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
326 0 : r = amdgpu_bo_reserve(vm->root.bo, false);
327 0 : if (r)
328 : return r;
329 :
330 0 : resv = vm->root.bo->tbo.base.resv;
331 : }
332 :
333 0 : initial_domain = (u32)(0xffffffff & args->in.domains);
334 : retry:
335 0 : r = amdgpu_gem_object_create(adev, size, args->in.alignment,
336 : initial_domain,
337 : flags, ttm_bo_type_device, resv, &gobj);
338 0 : if (r && r != -ERESTARTSYS) {
339 0 : if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
340 0 : flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
341 0 : goto retry;
342 : }
343 :
344 0 : if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
345 0 : initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
346 0 : goto retry;
347 : }
348 0 : DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
349 : size, initial_domain, args->in.alignment, r);
350 : }
351 :
352 0 : if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
353 0 : if (!r) {
354 0 : struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
355 :
356 0 : abo->parent = amdgpu_bo_ref(vm->root.bo);
357 : }
358 0 : amdgpu_bo_unreserve(vm->root.bo);
359 : }
360 0 : if (r)
361 : return r;
362 :
363 0 : r = drm_gem_handle_create(filp, gobj, &handle);
364 : /* drop reference from allocate - handle holds it now */
365 0 : drm_gem_object_put(gobj);
366 0 : if (r)
367 : return r;
368 :
369 0 : memset(args, 0, sizeof(*args));
370 0 : args->out.handle = handle;
371 0 : return 0;
372 : }
373 :
374 0 : int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
375 : struct drm_file *filp)
376 : {
377 0 : struct ttm_operation_ctx ctx = { true, false };
378 0 : struct amdgpu_device *adev = drm_to_adev(dev);
379 0 : struct drm_amdgpu_gem_userptr *args = data;
380 : struct drm_gem_object *gobj;
381 : struct amdgpu_bo *bo;
382 : uint32_t handle;
383 : int r;
384 :
385 0 : args->addr = untagged_addr(args->addr);
386 :
387 0 : if (offset_in_page(args->addr | args->size))
388 : return -EINVAL;
389 :
390 : /* reject unknown flag values */
391 0 : if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
392 : AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
393 : AMDGPU_GEM_USERPTR_REGISTER))
394 : return -EINVAL;
395 :
396 0 : if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
397 : !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
398 :
399 : /* if we want to write to it we must install a MMU notifier */
400 : return -EACCES;
401 : }
402 :
403 : /* create a gem object to contain this object in */
404 0 : r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
405 : 0, ttm_bo_type_device, NULL, &gobj);
406 0 : if (r)
407 : return r;
408 :
409 0 : bo = gem_to_amdgpu_bo(gobj);
410 0 : bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
411 0 : bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
412 0 : r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
413 0 : if (r)
414 : goto release_object;
415 :
416 0 : if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
417 0 : r = amdgpu_mn_register(bo, args->addr);
418 : if (r)
419 : goto release_object;
420 : }
421 :
422 0 : if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
423 : r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
424 : if (r)
425 : goto release_object;
426 :
427 : r = amdgpu_bo_reserve(bo, true);
428 : if (r)
429 : goto user_pages_done;
430 :
431 : amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
432 : r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
433 : amdgpu_bo_unreserve(bo);
434 : if (r)
435 : goto user_pages_done;
436 : }
437 :
438 0 : r = drm_gem_handle_create(filp, gobj, &handle);
439 0 : if (r)
440 : goto user_pages_done;
441 :
442 0 : args->handle = handle;
443 :
444 : user_pages_done:
445 0 : if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
446 : amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
447 :
448 : release_object:
449 0 : drm_gem_object_put(gobj);
450 :
451 : return r;
452 : }
453 :
454 0 : int amdgpu_mode_dumb_mmap(struct drm_file *filp,
455 : struct drm_device *dev,
456 : uint32_t handle, uint64_t *offset_p)
457 : {
458 : struct drm_gem_object *gobj;
459 : struct amdgpu_bo *robj;
460 :
461 0 : gobj = drm_gem_object_lookup(filp, handle);
462 0 : if (gobj == NULL) {
463 : return -ENOENT;
464 : }
465 0 : robj = gem_to_amdgpu_bo(gobj);
466 0 : if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
467 0 : (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
468 0 : drm_gem_object_put(gobj);
469 0 : return -EPERM;
470 : }
471 0 : *offset_p = amdgpu_bo_mmap_offset(robj);
472 0 : drm_gem_object_put(gobj);
473 0 : return 0;
474 : }
475 :
476 0 : int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
477 : struct drm_file *filp)
478 : {
479 0 : union drm_amdgpu_gem_mmap *args = data;
480 0 : uint32_t handle = args->in.handle;
481 0 : memset(args, 0, sizeof(*args));
482 0 : return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
483 : }
484 :
485 : /**
486 : * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
487 : *
488 : * @timeout_ns: timeout in ns
489 : *
490 : * Calculate the timeout in jiffies from an absolute timeout in ns.
491 : */
492 0 : unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
493 : {
494 : unsigned long timeout_jiffies;
495 : ktime_t timeout;
496 :
497 : /* clamp timeout if it's to large */
498 0 : if (((int64_t)timeout_ns) < 0)
499 : return MAX_SCHEDULE_TIMEOUT;
500 :
501 0 : timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
502 0 : if (ktime_to_ns(timeout) < 0)
503 : return 0;
504 :
505 0 : timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
506 : /* clamp timeout to avoid unsigned-> signed overflow */
507 0 : if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
508 : return MAX_SCHEDULE_TIMEOUT - 1;
509 :
510 0 : return timeout_jiffies;
511 : }
512 :
513 0 : int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
514 : struct drm_file *filp)
515 : {
516 0 : union drm_amdgpu_gem_wait_idle *args = data;
517 : struct drm_gem_object *gobj;
518 : struct amdgpu_bo *robj;
519 0 : uint32_t handle = args->in.handle;
520 0 : unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
521 0 : int r = 0;
522 : long ret;
523 :
524 0 : gobj = drm_gem_object_lookup(filp, handle);
525 0 : if (gobj == NULL) {
526 : return -ENOENT;
527 : }
528 0 : robj = gem_to_amdgpu_bo(gobj);
529 0 : ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
530 : true, timeout);
531 :
532 : /* ret == 0 means not signaled,
533 : * ret > 0 means signaled
534 : * ret < 0 means interrupted before timeout
535 : */
536 0 : if (ret >= 0) {
537 0 : memset(args, 0, sizeof(*args));
538 0 : args->out.status = (ret == 0);
539 : } else
540 0 : r = ret;
541 :
542 0 : drm_gem_object_put(gobj);
543 0 : return r;
544 : }
545 :
546 0 : int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
547 : struct drm_file *filp)
548 : {
549 0 : struct drm_amdgpu_gem_metadata *args = data;
550 : struct drm_gem_object *gobj;
551 : struct amdgpu_bo *robj;
552 0 : int r = -1;
553 :
554 0 : DRM_DEBUG("%d \n", args->handle);
555 0 : gobj = drm_gem_object_lookup(filp, args->handle);
556 0 : if (gobj == NULL)
557 : return -ENOENT;
558 0 : robj = gem_to_amdgpu_bo(gobj);
559 :
560 0 : r = amdgpu_bo_reserve(robj, false);
561 0 : if (unlikely(r != 0))
562 : goto out;
563 :
564 0 : if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
565 0 : amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
566 0 : r = amdgpu_bo_get_metadata(robj, args->data.data,
567 : sizeof(args->data.data),
568 0 : &args->data.data_size_bytes,
569 0 : &args->data.flags);
570 0 : } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
571 0 : if (args->data.data_size_bytes > sizeof(args->data.data)) {
572 : r = -EINVAL;
573 : goto unreserve;
574 : }
575 0 : r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
576 0 : if (!r)
577 0 : r = amdgpu_bo_set_metadata(robj, args->data.data,
578 : args->data.data_size_bytes,
579 : args->data.flags);
580 : }
581 :
582 : unreserve:
583 : amdgpu_bo_unreserve(robj);
584 : out:
585 0 : drm_gem_object_put(gobj);
586 0 : return r;
587 : }
588 :
589 : /**
590 : * amdgpu_gem_va_update_vm -update the bo_va in its VM
591 : *
592 : * @adev: amdgpu_device pointer
593 : * @vm: vm to update
594 : * @bo_va: bo_va to update
595 : * @operation: map, unmap or clear
596 : *
597 : * Update the bo_va directly after setting its address. Errors are not
598 : * vital here, so they are not reported back to userspace.
599 : */
600 0 : static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
601 : struct amdgpu_vm *vm,
602 : struct amdgpu_bo_va *bo_va,
603 : uint32_t operation)
604 : {
605 : int r;
606 :
607 0 : if (!amdgpu_vm_ready(vm))
608 : return;
609 :
610 0 : r = amdgpu_vm_clear_freed(adev, vm, NULL);
611 0 : if (r)
612 : goto error;
613 :
614 0 : if (operation == AMDGPU_VA_OP_MAP ||
615 0 : operation == AMDGPU_VA_OP_REPLACE) {
616 0 : r = amdgpu_vm_bo_update(adev, bo_va, false);
617 0 : if (r)
618 : goto error;
619 : }
620 :
621 0 : r = amdgpu_vm_update_pdes(adev, vm, false);
622 :
623 : error:
624 0 : if (r && r != -ERESTARTSYS)
625 0 : DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
626 : }
627 :
628 : /**
629 : * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
630 : *
631 : * @adev: amdgpu_device pointer
632 : * @flags: GEM UAPI flags
633 : *
634 : * Returns the GEM UAPI flags mapped into hardware for the ASIC.
635 : */
636 0 : uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
637 : {
638 0 : uint64_t pte_flag = 0;
639 :
640 0 : if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
641 0 : pte_flag |= AMDGPU_PTE_EXECUTABLE;
642 0 : if (flags & AMDGPU_VM_PAGE_READABLE)
643 0 : pte_flag |= AMDGPU_PTE_READABLE;
644 0 : if (flags & AMDGPU_VM_PAGE_WRITEABLE)
645 0 : pte_flag |= AMDGPU_PTE_WRITEABLE;
646 0 : if (flags & AMDGPU_VM_PAGE_PRT)
647 0 : pte_flag |= AMDGPU_PTE_PRT;
648 0 : if (flags & AMDGPU_VM_PAGE_NOALLOC)
649 0 : pte_flag |= AMDGPU_PTE_NOALLOC;
650 :
651 0 : if (adev->gmc.gmc_funcs->map_mtype)
652 0 : pte_flag |= amdgpu_gmc_map_mtype(adev,
653 : flags & AMDGPU_VM_MTYPE_MASK);
654 :
655 0 : return pte_flag;
656 : }
657 :
658 0 : int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
659 : struct drm_file *filp)
660 : {
661 0 : const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
662 : AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
663 : AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
664 : AMDGPU_VM_PAGE_NOALLOC;
665 0 : const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
666 : AMDGPU_VM_PAGE_PRT;
667 :
668 0 : struct drm_amdgpu_gem_va *args = data;
669 : struct drm_gem_object *gobj;
670 0 : struct amdgpu_device *adev = drm_to_adev(dev);
671 0 : struct amdgpu_fpriv *fpriv = filp->driver_priv;
672 : struct amdgpu_bo *abo;
673 : struct amdgpu_bo_va *bo_va;
674 : struct amdgpu_bo_list_entry vm_pd;
675 : struct ttm_validate_buffer tv;
676 : struct ww_acquire_ctx ticket;
677 : struct list_head list, duplicates;
678 : uint64_t va_flags;
679 : uint64_t vm_size;
680 0 : int r = 0;
681 :
682 0 : if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
683 : dev_dbg(dev->dev,
684 : "va_address 0x%LX is in reserved area 0x%LX\n",
685 : args->va_address, AMDGPU_VA_RESERVED_SIZE);
686 : return -EINVAL;
687 : }
688 :
689 0 : if (args->va_address >= AMDGPU_GMC_HOLE_START &&
690 : args->va_address < AMDGPU_GMC_HOLE_END) {
691 : dev_dbg(dev->dev,
692 : "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
693 : args->va_address, AMDGPU_GMC_HOLE_START,
694 : AMDGPU_GMC_HOLE_END);
695 : return -EINVAL;
696 : }
697 :
698 0 : args->va_address &= AMDGPU_GMC_HOLE_MASK;
699 :
700 0 : vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
701 0 : vm_size -= AMDGPU_VA_RESERVED_SIZE;
702 0 : if (args->va_address + args->map_size > vm_size) {
703 : dev_dbg(dev->dev,
704 : "va_address 0x%llx is in top reserved area 0x%llx\n",
705 : args->va_address + args->map_size, vm_size);
706 : return -EINVAL;
707 : }
708 :
709 0 : if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
710 : dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
711 : args->flags);
712 : return -EINVAL;
713 : }
714 :
715 0 : switch (args->operation) {
716 : case AMDGPU_VA_OP_MAP:
717 : case AMDGPU_VA_OP_UNMAP:
718 : case AMDGPU_VA_OP_CLEAR:
719 : case AMDGPU_VA_OP_REPLACE:
720 : break;
721 : default:
722 : dev_dbg(dev->dev, "unsupported operation %d\n",
723 : args->operation);
724 : return -EINVAL;
725 : }
726 :
727 0 : INIT_LIST_HEAD(&list);
728 0 : INIT_LIST_HEAD(&duplicates);
729 0 : if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
730 0 : !(args->flags & AMDGPU_VM_PAGE_PRT)) {
731 0 : gobj = drm_gem_object_lookup(filp, args->handle);
732 0 : if (gobj == NULL)
733 : return -ENOENT;
734 0 : abo = gem_to_amdgpu_bo(gobj);
735 0 : tv.bo = &abo->tbo;
736 0 : if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
737 0 : tv.num_shared = 1;
738 : else
739 0 : tv.num_shared = 0;
740 : list_add(&tv.head, &list);
741 : } else {
742 : gobj = NULL;
743 : abo = NULL;
744 : }
745 :
746 0 : amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
747 :
748 0 : r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
749 0 : if (r)
750 : goto error_unref;
751 :
752 0 : if (abo) {
753 0 : bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
754 0 : if (!bo_va) {
755 : r = -ENOENT;
756 : goto error_backoff;
757 : }
758 0 : } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
759 0 : bo_va = fpriv->prt_va;
760 : } else {
761 : bo_va = NULL;
762 : }
763 :
764 0 : switch (args->operation) {
765 : case AMDGPU_VA_OP_MAP:
766 0 : va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
767 0 : r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
768 : args->offset_in_bo, args->map_size,
769 : va_flags);
770 0 : break;
771 : case AMDGPU_VA_OP_UNMAP:
772 0 : r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
773 0 : break;
774 :
775 : case AMDGPU_VA_OP_CLEAR:
776 0 : r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
777 : args->va_address,
778 : args->map_size);
779 0 : break;
780 : case AMDGPU_VA_OP_REPLACE:
781 0 : va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
782 0 : r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
783 : args->offset_in_bo, args->map_size,
784 : va_flags);
785 0 : break;
786 : default:
787 : break;
788 : }
789 0 : if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
790 0 : amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
791 : args->operation);
792 :
793 : error_backoff:
794 0 : ttm_eu_backoff_reservation(&ticket, &list);
795 :
796 : error_unref:
797 : drm_gem_object_put(gobj);
798 : return r;
799 : }
800 :
801 0 : int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
802 : struct drm_file *filp)
803 : {
804 0 : struct amdgpu_device *adev = drm_to_adev(dev);
805 0 : struct drm_amdgpu_gem_op *args = data;
806 : struct drm_gem_object *gobj;
807 : struct amdgpu_vm_bo_base *base;
808 : struct amdgpu_bo *robj;
809 : int r;
810 :
811 0 : gobj = drm_gem_object_lookup(filp, args->handle);
812 0 : if (gobj == NULL) {
813 : return -ENOENT;
814 : }
815 0 : robj = gem_to_amdgpu_bo(gobj);
816 :
817 0 : r = amdgpu_bo_reserve(robj, false);
818 0 : if (unlikely(r))
819 : goto out;
820 :
821 0 : switch (args->op) {
822 : case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
823 : struct drm_amdgpu_gem_create_in info;
824 0 : void __user *out = u64_to_user_ptr(args->value);
825 :
826 0 : info.bo_size = robj->tbo.base.size;
827 0 : info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
828 0 : info.domains = robj->preferred_domains;
829 0 : info.domain_flags = robj->flags;
830 0 : amdgpu_bo_unreserve(robj);
831 0 : if (copy_to_user(out, &info, sizeof(info)))
832 0 : r = -EFAULT;
833 : break;
834 : }
835 : case AMDGPU_GEM_OP_SET_PLACEMENT:
836 0 : if (robj->tbo.base.import_attach &&
837 0 : args->value & AMDGPU_GEM_DOMAIN_VRAM) {
838 0 : r = -EINVAL;
839 : amdgpu_bo_unreserve(robj);
840 : break;
841 : }
842 0 : if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
843 0 : r = -EPERM;
844 : amdgpu_bo_unreserve(robj);
845 : break;
846 : }
847 0 : for (base = robj->vm_bo; base; base = base->next)
848 0 : if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
849 0 : amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
850 0 : r = -EINVAL;
851 : amdgpu_bo_unreserve(robj);
852 : goto out;
853 : }
854 :
855 :
856 0 : robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
857 : AMDGPU_GEM_DOMAIN_GTT |
858 : AMDGPU_GEM_DOMAIN_CPU);
859 0 : robj->allowed_domains = robj->preferred_domains;
860 0 : if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
861 0 : robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
862 :
863 0 : if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
864 0 : amdgpu_vm_bo_invalidate(adev, robj, true);
865 :
866 : amdgpu_bo_unreserve(robj);
867 : break;
868 : default:
869 0 : amdgpu_bo_unreserve(robj);
870 0 : r = -EINVAL;
871 : }
872 :
873 : out:
874 0 : drm_gem_object_put(gobj);
875 0 : return r;
876 : }
877 :
878 : static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
879 : int width,
880 : int cpp,
881 : bool tiled)
882 : {
883 0 : int aligned = width;
884 0 : int pitch_mask = 0;
885 :
886 : switch (cpp) {
887 : case 1:
888 : pitch_mask = 255;
889 : break;
890 : case 2:
891 : pitch_mask = 127;
892 : break;
893 : case 3:
894 : case 4:
895 : pitch_mask = 63;
896 : break;
897 : }
898 :
899 0 : aligned += pitch_mask;
900 0 : aligned &= ~pitch_mask;
901 0 : return aligned * cpp;
902 : }
903 :
904 0 : int amdgpu_mode_dumb_create(struct drm_file *file_priv,
905 : struct drm_device *dev,
906 : struct drm_mode_create_dumb *args)
907 : {
908 0 : struct amdgpu_device *adev = drm_to_adev(dev);
909 : struct drm_gem_object *gobj;
910 : uint32_t handle;
911 0 : u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
912 : AMDGPU_GEM_CREATE_CPU_GTT_USWC |
913 : AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
914 : u32 domain;
915 : int r;
916 :
917 : /*
918 : * The buffer returned from this function should be cleared, but
919 : * it can only be done if the ring is enabled or we'll fail to
920 : * create the buffer.
921 : */
922 0 : if (adev->mman.buffer_funcs_enabled)
923 0 : flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
924 :
925 0 : args->pitch = amdgpu_gem_align_pitch(adev, args->width,
926 0 : DIV_ROUND_UP(args->bpp, 8), 0);
927 0 : args->size = (u64)args->pitch * args->height;
928 0 : args->size = ALIGN(args->size, PAGE_SIZE);
929 0 : domain = amdgpu_bo_get_preferred_domain(adev,
930 : amdgpu_display_supported_domains(adev, flags));
931 0 : r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
932 : ttm_bo_type_device, NULL, &gobj);
933 0 : if (r)
934 : return -ENOMEM;
935 :
936 0 : r = drm_gem_handle_create(file_priv, gobj, &handle);
937 : /* drop reference from allocate - handle holds it now */
938 0 : drm_gem_object_put(gobj);
939 0 : if (r) {
940 : return r;
941 : }
942 0 : args->handle = handle;
943 0 : return 0;
944 : }
945 :
946 : #if defined(CONFIG_DEBUG_FS)
947 : static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
948 : {
949 : struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
950 : struct drm_device *dev = adev_to_drm(adev);
951 : struct drm_file *file;
952 : int r;
953 :
954 : r = mutex_lock_interruptible(&dev->filelist_mutex);
955 : if (r)
956 : return r;
957 :
958 : list_for_each_entry(file, &dev->filelist, lhead) {
959 : struct task_struct *task;
960 : struct drm_gem_object *gobj;
961 : int id;
962 :
963 : /*
964 : * Although we have a valid reference on file->pid, that does
965 : * not guarantee that the task_struct who called get_pid() is
966 : * still alive (e.g. get_pid(current) => fork() => exit()).
967 : * Therefore, we need to protect this ->comm access using RCU.
968 : */
969 : rcu_read_lock();
970 : task = pid_task(file->pid, PIDTYPE_PID);
971 : seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
972 : task ? task->comm : "<unknown>");
973 : rcu_read_unlock();
974 :
975 : spin_lock(&file->table_lock);
976 : idr_for_each_entry(&file->object_idr, gobj, id) {
977 : struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
978 :
979 : amdgpu_bo_print_info(id, bo, m);
980 : }
981 : spin_unlock(&file->table_lock);
982 : }
983 :
984 : mutex_unlock(&dev->filelist_mutex);
985 : return 0;
986 : }
987 :
988 : DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
989 :
990 : #endif
991 :
992 0 : void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
993 : {
994 : #if defined(CONFIG_DEBUG_FS)
995 : struct drm_minor *minor = adev_to_drm(adev)->primary;
996 : struct dentry *root = minor->debugfs_root;
997 :
998 : debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
999 : &amdgpu_debugfs_gem_info_fops);
1000 : #endif
1001 0 : }
|