Line data Source code
1 : /*
2 : * Copyright 2016 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Christian König
23 : */
24 : #ifndef __AMDGPU_VM_H__
25 : #define __AMDGPU_VM_H__
26 :
27 : #include <linux/idr.h>
28 : #include <linux/kfifo.h>
29 : #include <linux/rbtree.h>
30 : #include <drm/gpu_scheduler.h>
31 : #include <drm/drm_file.h>
32 : #include <drm/ttm/ttm_bo_driver.h>
33 : #include <linux/sched/mm.h>
34 :
35 : #include "amdgpu_sync.h"
36 : #include "amdgpu_ring.h"
37 : #include "amdgpu_ids.h"
38 :
39 : struct amdgpu_bo_va;
40 : struct amdgpu_job;
41 : struct amdgpu_bo_list_entry;
42 : struct amdgpu_bo_vm;
43 :
44 : /*
45 : * GPUVM handling
46 : */
47 :
48 : /* Maximum number of PTEs the hardware can write with one command */
49 : #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
50 :
51 : /* number of entries in page table */
52 : #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
53 :
54 : #define AMDGPU_PTE_VALID (1ULL << 0)
55 : #define AMDGPU_PTE_SYSTEM (1ULL << 1)
56 : #define AMDGPU_PTE_SNOOPED (1ULL << 2)
57 :
58 : /* RV+ */
59 : #define AMDGPU_PTE_TMZ (1ULL << 3)
60 :
61 : /* VI only */
62 : #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
63 :
64 : #define AMDGPU_PTE_READABLE (1ULL << 5)
65 : #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
66 :
67 : #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
68 :
69 : /* TILED for VEGA10, reserved for older ASICs */
70 : #define AMDGPU_PTE_PRT (1ULL << 51)
71 :
72 : /* PDE is handled as PTE for VEGA10 */
73 : #define AMDGPU_PDE_PTE (1ULL << 54)
74 :
75 : #define AMDGPU_PTE_LOG (1ULL << 55)
76 :
77 : /* PTE is handled as PDE for VEGA10 (Translate Further) */
78 : #define AMDGPU_PTE_TF (1ULL << 56)
79 :
80 : /* MALL noalloc for sienna_cichlid, reserved for older ASICs */
81 : #define AMDGPU_PTE_NOALLOC (1ULL << 58)
82 :
83 : /* PDE Block Fragment Size for VEGA10 */
84 : #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
85 :
86 :
87 : /* For GFX9 */
88 : #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
89 : #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
90 :
91 : #define AMDGPU_MTYPE_NC 0
92 : #define AMDGPU_MTYPE_CC 2
93 :
94 : #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
95 : | AMDGPU_PTE_SNOOPED \
96 : | AMDGPU_PTE_EXECUTABLE \
97 : | AMDGPU_PTE_READABLE \
98 : | AMDGPU_PTE_WRITEABLE \
99 : | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
100 :
101 : /* gfx10 */
102 : #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
103 : #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
104 :
105 : /* How to program VM fault handling */
106 : #define AMDGPU_VM_FAULT_STOP_NEVER 0
107 : #define AMDGPU_VM_FAULT_STOP_FIRST 1
108 : #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
109 :
110 : /* Reserve 4MB VRAM for page tables */
111 : #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
112 :
113 : /* max number of VMHUB */
114 : #define AMDGPU_MAX_VMHUBS 3
115 : #define AMDGPU_GFXHUB_0 0
116 : #define AMDGPU_MMHUB_0 1
117 : #define AMDGPU_MMHUB_1 2
118 :
119 : /* Reserve 2MB at top/bottom of address space for kernel use */
120 : #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
121 :
122 : /* max vmids dedicated for process */
123 : #define AMDGPU_VM_MAX_RESERVED_VMID 1
124 :
125 : /* See vm_update_mode */
126 : #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
127 : #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
128 :
129 : /* VMPT level enumerate, and the hiberachy is:
130 : * PDB2->PDB1->PDB0->PTB
131 : */
132 : enum amdgpu_vm_level {
133 : AMDGPU_VM_PDB2,
134 : AMDGPU_VM_PDB1,
135 : AMDGPU_VM_PDB0,
136 : AMDGPU_VM_PTB
137 : };
138 :
139 : /* base structure for tracking BO usage in a VM */
140 : struct amdgpu_vm_bo_base {
141 : /* constant after initialization */
142 : struct amdgpu_vm *vm;
143 : struct amdgpu_bo *bo;
144 :
145 : /* protected by bo being reserved */
146 : struct amdgpu_vm_bo_base *next;
147 :
148 : /* protected by spinlock */
149 : struct list_head vm_status;
150 :
151 : /* protected by the BO being reserved */
152 : bool moved;
153 : };
154 :
155 : /* provided by hw blocks that can write ptes, e.g., sdma */
156 : struct amdgpu_vm_pte_funcs {
157 : /* number of dw to reserve per operation */
158 : unsigned copy_pte_num_dw;
159 :
160 : /* copy pte entries from GART */
161 : void (*copy_pte)(struct amdgpu_ib *ib,
162 : uint64_t pe, uint64_t src,
163 : unsigned count);
164 :
165 : /* write pte one entry at a time with addr mapping */
166 : void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
167 : uint64_t value, unsigned count,
168 : uint32_t incr);
169 : /* for linear pte/pde updates without addr mapping */
170 : void (*set_pte_pde)(struct amdgpu_ib *ib,
171 : uint64_t pe,
172 : uint64_t addr, unsigned count,
173 : uint32_t incr, uint64_t flags);
174 : };
175 :
176 : struct amdgpu_task_info {
177 : char process_name[TASK_COMM_LEN];
178 : char task_name[TASK_COMM_LEN];
179 : pid_t pid;
180 : pid_t tgid;
181 : };
182 :
183 : /**
184 : * struct amdgpu_vm_update_params
185 : *
186 : * Encapsulate some VM table update parameters to reduce
187 : * the number of function parameters
188 : *
189 : */
190 : struct amdgpu_vm_update_params {
191 :
192 : /**
193 : * @adev: amdgpu device we do this update for
194 : */
195 : struct amdgpu_device *adev;
196 :
197 : /**
198 : * @vm: optional amdgpu_vm we do this update for
199 : */
200 : struct amdgpu_vm *vm;
201 :
202 : /**
203 : * @immediate: if changes should be made immediately
204 : */
205 : bool immediate;
206 :
207 : /**
208 : * @unlocked: true if the root BO is not locked
209 : */
210 : bool unlocked;
211 :
212 : /**
213 : * @pages_addr:
214 : *
215 : * DMA addresses to use for mapping
216 : */
217 : dma_addr_t *pages_addr;
218 :
219 : /**
220 : * @job: job to used for hw submission
221 : */
222 : struct amdgpu_job *job;
223 :
224 : /**
225 : * @num_dw_left: number of dw left for the IB
226 : */
227 : unsigned int num_dw_left;
228 :
229 : /**
230 : * @table_freed: return true if page table is freed when updating
231 : */
232 : bool table_freed;
233 : };
234 :
235 : struct amdgpu_vm_update_funcs {
236 : int (*map_table)(struct amdgpu_bo_vm *bo);
237 : int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
238 : enum amdgpu_sync_mode sync_mode);
239 : int (*update)(struct amdgpu_vm_update_params *p,
240 : struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
241 : unsigned count, uint32_t incr, uint64_t flags);
242 : int (*commit)(struct amdgpu_vm_update_params *p,
243 : struct dma_fence **fence);
244 : };
245 :
246 : struct amdgpu_vm {
247 : /* tree of virtual addresses mapped */
248 : struct rb_root_cached va;
249 :
250 : /* Lock to prevent eviction while we are updating page tables
251 : * use vm_eviction_lock/unlock(vm)
252 : */
253 : struct mutex eviction_lock;
254 : bool evicting;
255 : unsigned int saved_flags;
256 :
257 : /* BOs who needs a validation */
258 : struct list_head evicted;
259 :
260 : /* PT BOs which relocated and their parent need an update */
261 : struct list_head relocated;
262 :
263 : /* per VM BOs moved, but not yet updated in the PT */
264 : struct list_head moved;
265 :
266 : /* All BOs of this VM not currently in the state machine */
267 : struct list_head idle;
268 :
269 : /* regular invalidated BOs, but not yet updated in the PT */
270 : struct list_head invalidated;
271 : spinlock_t invalidated_lock;
272 :
273 : /* BO mappings freed, but not yet updated in the PT */
274 : struct list_head freed;
275 :
276 : /* BOs which are invalidated, has been updated in the PTs */
277 : struct list_head done;
278 :
279 : /* contains the page directory */
280 : struct amdgpu_vm_bo_base root;
281 : struct dma_fence *last_update;
282 :
283 : /* Scheduler entities for page table updates */
284 : struct drm_sched_entity immediate;
285 : struct drm_sched_entity delayed;
286 :
287 : /* Last finished delayed update */
288 : atomic64_t tlb_seq;
289 : struct dma_fence *last_tlb_flush;
290 :
291 : /* Last unlocked submission to the scheduler entities */
292 : struct dma_fence *last_unlocked;
293 :
294 : unsigned int pasid;
295 : /* dedicated to vm */
296 : struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
297 :
298 : /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
299 : bool use_cpu_for_update;
300 :
301 : /* Functions to use for VM table updates */
302 : const struct amdgpu_vm_update_funcs *update_funcs;
303 :
304 : /* Flag to indicate ATS support from PTE for GFX9 */
305 : bool pte_support_ats;
306 :
307 : /* Up to 128 pending retry page faults */
308 : DECLARE_KFIFO(faults, u64, 128);
309 :
310 : /* Points to the KFD process VM info */
311 : struct amdkfd_process_info *process_info;
312 :
313 : /* List node in amdkfd_process_info.vm_list_head */
314 : struct list_head vm_list_node;
315 :
316 : /* Valid while the PD is reserved or fenced */
317 : uint64_t pd_phys_addr;
318 :
319 : /* Some basic info about the task */
320 : struct amdgpu_task_info task_info;
321 :
322 : /* Store positions of group of BOs */
323 : struct ttm_lru_bulk_move lru_bulk_move;
324 : /* Flag to indicate if VM is used for compute */
325 : bool is_compute_context;
326 : };
327 :
328 : struct amdgpu_vm_manager {
329 : /* Handling of VMIDs */
330 : struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
331 : unsigned int first_kfd_vmid;
332 : bool concurrent_flush;
333 :
334 : /* Handling of VM fences */
335 : u64 fence_context;
336 : unsigned seqno[AMDGPU_MAX_RINGS];
337 :
338 : uint64_t max_pfn;
339 : uint32_t num_level;
340 : uint32_t block_size;
341 : uint32_t fragment_size;
342 : enum amdgpu_vm_level root_level;
343 : /* vram base address for page table entry */
344 : u64 vram_base_offset;
345 : /* vm pte handling */
346 : const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
347 : struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
348 : unsigned vm_pte_num_scheds;
349 : struct amdgpu_ring *page_fault;
350 :
351 : /* partial resident texture handling */
352 : spinlock_t prt_lock;
353 : atomic_t num_prt_users;
354 :
355 : /* controls how VM page tables are updated for Graphics and Compute.
356 : * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
357 : * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
358 : */
359 : int vm_update_mode;
360 :
361 : /* PASID to VM mapping, will be used in interrupt context to
362 : * look up VM of a page fault
363 : */
364 : struct xarray pasids;
365 : };
366 :
367 : struct amdgpu_bo_va_mapping;
368 :
369 : #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
370 : #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
371 : #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
372 :
373 : extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
374 : extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
375 :
376 : void amdgpu_vm_manager_init(struct amdgpu_device *adev);
377 : void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
378 :
379 : int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
380 : u32 pasid);
381 :
382 : long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
383 : int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
384 : int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
385 : void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
386 : void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
387 : void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
388 : struct list_head *validated,
389 : struct amdgpu_bo_list_entry *entry);
390 : bool amdgpu_vm_ready(struct amdgpu_vm *vm);
391 : int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
392 : int (*callback)(void *p, struct amdgpu_bo *bo),
393 : void *param);
394 : int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
395 : int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
396 : struct amdgpu_vm *vm, bool immediate);
397 : int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
398 : struct amdgpu_vm *vm,
399 : struct dma_fence **fence);
400 : int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
401 : struct amdgpu_vm *vm);
402 : void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
403 : struct amdgpu_vm *vm, struct amdgpu_bo *bo);
404 : int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
405 : bool immediate, bool unlocked, bool flush_tlb,
406 : struct dma_resv *resv, uint64_t start, uint64_t last,
407 : uint64_t flags, uint64_t offset, uint64_t vram_base,
408 : struct ttm_resource *res, dma_addr_t *pages_addr,
409 : struct dma_fence **fence);
410 : int amdgpu_vm_bo_update(struct amdgpu_device *adev,
411 : struct amdgpu_bo_va *bo_va,
412 : bool clear);
413 : bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
414 : void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
415 : struct amdgpu_bo *bo, bool evicted);
416 : uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
417 : struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
418 : struct amdgpu_bo *bo);
419 : struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
420 : struct amdgpu_vm *vm,
421 : struct amdgpu_bo *bo);
422 : int amdgpu_vm_bo_map(struct amdgpu_device *adev,
423 : struct amdgpu_bo_va *bo_va,
424 : uint64_t addr, uint64_t offset,
425 : uint64_t size, uint64_t flags);
426 : int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
427 : struct amdgpu_bo_va *bo_va,
428 : uint64_t addr, uint64_t offset,
429 : uint64_t size, uint64_t flags);
430 : int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
431 : struct amdgpu_bo_va *bo_va,
432 : uint64_t addr);
433 : int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
434 : struct amdgpu_vm *vm,
435 : uint64_t saddr, uint64_t size);
436 : struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
437 : uint64_t addr);
438 : void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
439 : void amdgpu_vm_bo_del(struct amdgpu_device *adev,
440 : struct amdgpu_bo_va *bo_va);
441 : void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
442 : uint32_t fragment_size_default, unsigned max_level,
443 : unsigned max_bits);
444 : int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
445 : bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
446 : struct amdgpu_job *job);
447 : void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
448 :
449 : void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
450 : struct amdgpu_task_info *task_info);
451 : bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
452 : uint64_t addr, bool write_fault);
453 :
454 : void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
455 :
456 : void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
457 : struct amdgpu_vm *vm);
458 : void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
459 : uint64_t *gtt_mem, uint64_t *cpu_mem);
460 :
461 : int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462 : struct amdgpu_bo_vm *vmbo, bool immediate);
463 : int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
464 : int level, bool immediate, struct amdgpu_bo_vm **vmbo);
465 : void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
466 : bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
467 : struct amdgpu_vm *vm);
468 :
469 : int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
470 : struct amdgpu_vm_bo_base *entry);
471 : int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
472 : uint64_t start, uint64_t end,
473 : uint64_t dst, uint64_t flags);
474 :
475 : #if defined(CONFIG_DEBUG_FS)
476 : void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
477 : #endif
478 :
479 : /**
480 : * amdgpu_vm_tlb_seq - return tlb flush sequence number
481 : * @vm: the amdgpu_vm structure to query
482 : *
483 : * Returns the tlb flush sequence number which indicates that the VM TLBs needs
484 : * to be invalidated whenever the sequence number change.
485 : */
486 : static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
487 : {
488 0 : return atomic64_read(&vm->tlb_seq);
489 : }
490 :
491 : #endif
|