Line data Source code
1 : /*
2 : * Copyright 2014 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Christian König <christian.koenig@amd.com>
23 : */
24 :
25 : #include <linux/firmware.h>
26 :
27 : #include "amdgpu.h"
28 : #include "amdgpu_uvd.h"
29 : #include "vid.h"
30 : #include "uvd/uvd_6_0_d.h"
31 : #include "uvd/uvd_6_0_sh_mask.h"
32 : #include "oss/oss_2_0_d.h"
33 : #include "oss/oss_2_0_sh_mask.h"
34 : #include "smu/smu_7_1_3_d.h"
35 : #include "smu/smu_7_1_3_sh_mask.h"
36 : #include "bif/bif_5_1_d.h"
37 : #include "gmc/gmc_8_1_d.h"
38 : #include "vi.h"
39 : #include "ivsrcid/ivsrcid_vislands30.h"
40 :
41 : /* Polaris10/11/12 firmware version */
42 : #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43 :
44 : static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45 : static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46 :
47 : static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48 : static int uvd_v6_0_start(struct amdgpu_device *adev);
49 : static void uvd_v6_0_stop(struct amdgpu_device *adev);
50 : static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51 : static int uvd_v6_0_set_clockgating_state(void *handle,
52 : enum amd_clockgating_state state);
53 : static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 : bool enable);
55 :
56 : /**
57 : * uvd_v6_0_enc_support - get encode support status
58 : *
59 : * @adev: amdgpu_device pointer
60 : *
61 : * Returns the current hardware encode support status
62 : */
63 : static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64 : {
65 0 : return ((adev->asic_type >= CHIP_POLARIS10) &&
66 0 : (adev->asic_type <= CHIP_VEGAM) &&
67 0 : (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68 : }
69 :
70 : /**
71 : * uvd_v6_0_ring_get_rptr - get read pointer
72 : *
73 : * @ring: amdgpu_ring pointer
74 : *
75 : * Returns the current hardware read pointer
76 : */
77 0 : static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78 : {
79 0 : struct amdgpu_device *adev = ring->adev;
80 :
81 0 : return RREG32(mmUVD_RBC_RB_RPTR);
82 : }
83 :
84 : /**
85 : * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 : *
87 : * @ring: amdgpu_ring pointer
88 : *
89 : * Returns the current hardware enc read pointer
90 : */
91 0 : static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92 : {
93 0 : struct amdgpu_device *adev = ring->adev;
94 :
95 0 : if (ring == &adev->uvd.inst->ring_enc[0])
96 0 : return RREG32(mmUVD_RB_RPTR);
97 : else
98 0 : return RREG32(mmUVD_RB_RPTR2);
99 : }
100 : /**
101 : * uvd_v6_0_ring_get_wptr - get write pointer
102 : *
103 : * @ring: amdgpu_ring pointer
104 : *
105 : * Returns the current hardware write pointer
106 : */
107 0 : static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108 : {
109 0 : struct amdgpu_device *adev = ring->adev;
110 :
111 0 : return RREG32(mmUVD_RBC_RB_WPTR);
112 : }
113 :
114 : /**
115 : * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 : *
117 : * @ring: amdgpu_ring pointer
118 : *
119 : * Returns the current hardware enc write pointer
120 : */
121 0 : static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122 : {
123 0 : struct amdgpu_device *adev = ring->adev;
124 :
125 0 : if (ring == &adev->uvd.inst->ring_enc[0])
126 0 : return RREG32(mmUVD_RB_WPTR);
127 : else
128 0 : return RREG32(mmUVD_RB_WPTR2);
129 : }
130 :
131 : /**
132 : * uvd_v6_0_ring_set_wptr - set write pointer
133 : *
134 : * @ring: amdgpu_ring pointer
135 : *
136 : * Commits the write pointer to the hardware
137 : */
138 0 : static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139 : {
140 0 : struct amdgpu_device *adev = ring->adev;
141 :
142 0 : WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 0 : }
144 :
145 : /**
146 : * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 : *
148 : * @ring: amdgpu_ring pointer
149 : *
150 : * Commits the enc write pointer to the hardware
151 : */
152 0 : static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153 : {
154 0 : struct amdgpu_device *adev = ring->adev;
155 :
156 0 : if (ring == &adev->uvd.inst->ring_enc[0])
157 0 : WREG32(mmUVD_RB_WPTR,
158 : lower_32_bits(ring->wptr));
159 : else
160 0 : WREG32(mmUVD_RB_WPTR2,
161 : lower_32_bits(ring->wptr));
162 0 : }
163 :
164 : /**
165 : * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 : *
167 : * @ring: the engine to test on
168 : *
169 : */
170 0 : static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171 : {
172 0 : struct amdgpu_device *adev = ring->adev;
173 : uint32_t rptr;
174 : unsigned i;
175 : int r;
176 :
177 0 : r = amdgpu_ring_alloc(ring, 16);
178 0 : if (r)
179 : return r;
180 :
181 0 : rptr = amdgpu_ring_get_rptr(ring);
182 :
183 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 0 : amdgpu_ring_commit(ring);
185 :
186 0 : for (i = 0; i < adev->usec_timeout; i++) {
187 0 : if (amdgpu_ring_get_rptr(ring) != rptr)
188 : break;
189 0 : udelay(1);
190 : }
191 :
192 0 : if (i >= adev->usec_timeout)
193 0 : r = -ETIMEDOUT;
194 :
195 : return r;
196 : }
197 :
198 : /**
199 : * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 : *
201 : * @ring: ring we should submit the msg to
202 : * @handle: session handle to use
203 : * @bo: amdgpu object for which we query the offset
204 : * @fence: optional fence to return
205 : *
206 : * Open up a stream for HW test
207 : */
208 0 : static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 : struct amdgpu_bo *bo,
210 : struct dma_fence **fence)
211 : {
212 0 : const unsigned ib_size_dw = 16;
213 : struct amdgpu_job *job;
214 : struct amdgpu_ib *ib;
215 0 : struct dma_fence *f = NULL;
216 : uint64_t addr;
217 : int i, r;
218 :
219 0 : r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 : AMDGPU_IB_POOL_DIRECT, &job);
221 0 : if (r)
222 : return r;
223 :
224 0 : ib = &job->ibs[0];
225 0 : addr = amdgpu_bo_gpu_offset(bo);
226 :
227 : ib->length_dw = 0;
228 0 : ib->ptr[ib->length_dw++] = 0x00000018;
229 0 : ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 0 : ib->ptr[ib->length_dw++] = handle;
231 0 : ib->ptr[ib->length_dw++] = 0x00010000;
232 0 : ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 0 : ib->ptr[ib->length_dw++] = addr;
234 :
235 0 : ib->ptr[ib->length_dw++] = 0x00000014;
236 0 : ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 0 : ib->ptr[ib->length_dw++] = 0x0000001c;
238 0 : ib->ptr[ib->length_dw++] = 0x00000001;
239 0 : ib->ptr[ib->length_dw++] = 0x00000000;
240 :
241 0 : ib->ptr[ib->length_dw++] = 0x00000008;
242 0 : ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243 :
244 0 : for (i = ib->length_dw; i < ib_size_dw; ++i)
245 0 : ib->ptr[i] = 0x0;
246 :
247 0 : r = amdgpu_job_submit_direct(job, ring, &f);
248 0 : if (r)
249 : goto err;
250 :
251 0 : if (fence)
252 0 : *fence = dma_fence_get(f);
253 0 : dma_fence_put(f);
254 : return 0;
255 :
256 : err:
257 0 : amdgpu_job_free(job);
258 0 : return r;
259 : }
260 :
261 : /**
262 : * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263 : *
264 : * @ring: ring we should submit the msg to
265 : * @handle: session handle to use
266 : * @bo: amdgpu object for which we query the offset
267 : * @fence: optional fence to return
268 : *
269 : * Close up a stream for HW test or if userspace failed to do so
270 : */
271 0 : static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 : uint32_t handle,
273 : struct amdgpu_bo *bo,
274 : struct dma_fence **fence)
275 : {
276 0 : const unsigned ib_size_dw = 16;
277 : struct amdgpu_job *job;
278 : struct amdgpu_ib *ib;
279 0 : struct dma_fence *f = NULL;
280 : uint64_t addr;
281 : int i, r;
282 :
283 0 : r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 : AMDGPU_IB_POOL_DIRECT, &job);
285 0 : if (r)
286 : return r;
287 :
288 0 : ib = &job->ibs[0];
289 0 : addr = amdgpu_bo_gpu_offset(bo);
290 :
291 : ib->length_dw = 0;
292 0 : ib->ptr[ib->length_dw++] = 0x00000018;
293 0 : ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 0 : ib->ptr[ib->length_dw++] = handle;
295 0 : ib->ptr[ib->length_dw++] = 0x00010000;
296 0 : ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 0 : ib->ptr[ib->length_dw++] = addr;
298 :
299 0 : ib->ptr[ib->length_dw++] = 0x00000014;
300 0 : ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 0 : ib->ptr[ib->length_dw++] = 0x0000001c;
302 0 : ib->ptr[ib->length_dw++] = 0x00000001;
303 0 : ib->ptr[ib->length_dw++] = 0x00000000;
304 :
305 0 : ib->ptr[ib->length_dw++] = 0x00000008;
306 0 : ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307 :
308 0 : for (i = ib->length_dw; i < ib_size_dw; ++i)
309 0 : ib->ptr[i] = 0x0;
310 :
311 0 : r = amdgpu_job_submit_direct(job, ring, &f);
312 0 : if (r)
313 : goto err;
314 :
315 0 : if (fence)
316 0 : *fence = dma_fence_get(f);
317 0 : dma_fence_put(f);
318 : return 0;
319 :
320 : err:
321 0 : amdgpu_job_free(job);
322 0 : return r;
323 : }
324 :
325 : /**
326 : * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327 : *
328 : * @ring: the engine to test on
329 : * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
330 : *
331 : */
332 0 : static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333 : {
334 0 : struct dma_fence *fence = NULL;
335 0 : struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
336 : long r;
337 :
338 0 : r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
339 0 : if (r)
340 : goto error;
341 :
342 0 : r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
343 0 : if (r)
344 : goto error;
345 :
346 0 : r = dma_fence_wait_timeout(fence, false, timeout);
347 0 : if (r == 0)
348 : r = -ETIMEDOUT;
349 0 : else if (r > 0)
350 0 : r = 0;
351 :
352 : error:
353 0 : dma_fence_put(fence);
354 0 : return r;
355 : }
356 :
357 0 : static int uvd_v6_0_early_init(void *handle)
358 : {
359 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360 0 : adev->uvd.num_uvd_inst = 1;
361 :
362 0 : if (!(adev->flags & AMD_IS_APU) &&
363 0 : (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
364 : return -ENOENT;
365 :
366 0 : uvd_v6_0_set_ring_funcs(adev);
367 :
368 0 : if (uvd_v6_0_enc_support(adev)) {
369 0 : adev->uvd.num_enc_rings = 2;
370 : uvd_v6_0_set_enc_ring_funcs(adev);
371 : }
372 :
373 0 : uvd_v6_0_set_irq_funcs(adev);
374 :
375 0 : return 0;
376 : }
377 :
378 0 : static int uvd_v6_0_sw_init(void *handle)
379 : {
380 : struct amdgpu_ring *ring;
381 : int i, r;
382 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
383 :
384 : /* UVD TRAP */
385 0 : r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
386 0 : if (r)
387 : return r;
388 :
389 : /* UVD ENC TRAP */
390 0 : if (uvd_v6_0_enc_support(adev)) {
391 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
392 0 : r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
393 0 : if (r)
394 : return r;
395 : }
396 : }
397 :
398 0 : r = amdgpu_uvd_sw_init(adev);
399 0 : if (r)
400 : return r;
401 :
402 0 : if (!uvd_v6_0_enc_support(adev)) {
403 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i)
404 0 : adev->uvd.inst->ring_enc[i].funcs = NULL;
405 :
406 0 : adev->uvd.inst->irq.num_types = 1;
407 0 : adev->uvd.num_enc_rings = 0;
408 :
409 0 : DRM_INFO("UVD ENC is disabled\n");
410 : }
411 :
412 0 : ring = &adev->uvd.inst->ring;
413 0 : sprintf(ring->name, "uvd");
414 0 : r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
415 : AMDGPU_RING_PRIO_DEFAULT, NULL);
416 0 : if (r)
417 : return r;
418 :
419 0 : r = amdgpu_uvd_resume(adev);
420 0 : if (r)
421 : return r;
422 :
423 0 : if (uvd_v6_0_enc_support(adev)) {
424 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
425 0 : ring = &adev->uvd.inst->ring_enc[i];
426 0 : sprintf(ring->name, "uvd_enc%d", i);
427 0 : r = amdgpu_ring_init(adev, ring, 512,
428 : &adev->uvd.inst->irq, 0,
429 : AMDGPU_RING_PRIO_DEFAULT, NULL);
430 0 : if (r)
431 : return r;
432 : }
433 : }
434 :
435 0 : r = amdgpu_uvd_entity_init(adev);
436 :
437 0 : return r;
438 : }
439 :
440 0 : static int uvd_v6_0_sw_fini(void *handle)
441 : {
442 : int i, r;
443 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
444 :
445 0 : r = amdgpu_uvd_suspend(adev);
446 0 : if (r)
447 : return r;
448 :
449 0 : if (uvd_v6_0_enc_support(adev)) {
450 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i)
451 0 : amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
452 : }
453 :
454 0 : return amdgpu_uvd_sw_fini(adev);
455 : }
456 :
457 : /**
458 : * uvd_v6_0_hw_init - start and test UVD block
459 : *
460 : * @handle: handle used to pass amdgpu_device pointer
461 : *
462 : * Initialize the hardware, boot up the VCPU and do some testing
463 : */
464 0 : static int uvd_v6_0_hw_init(void *handle)
465 : {
466 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
467 0 : struct amdgpu_ring *ring = &adev->uvd.inst->ring;
468 : uint32_t tmp;
469 : int i, r;
470 :
471 0 : amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
472 0 : uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
473 0 : uvd_v6_0_enable_mgcg(adev, true);
474 :
475 0 : r = amdgpu_ring_test_helper(ring);
476 0 : if (r)
477 : goto done;
478 :
479 0 : r = amdgpu_ring_alloc(ring, 10);
480 0 : if (r) {
481 0 : DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
482 0 : goto done;
483 : }
484 :
485 0 : tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
486 0 : amdgpu_ring_write(ring, tmp);
487 0 : amdgpu_ring_write(ring, 0xFFFFF);
488 :
489 0 : tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
490 0 : amdgpu_ring_write(ring, tmp);
491 0 : amdgpu_ring_write(ring, 0xFFFFF);
492 :
493 0 : tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
494 0 : amdgpu_ring_write(ring, tmp);
495 0 : amdgpu_ring_write(ring, 0xFFFFF);
496 :
497 : /* Clear timeout status bits */
498 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
499 0 : amdgpu_ring_write(ring, 0x8);
500 :
501 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
502 0 : amdgpu_ring_write(ring, 3);
503 :
504 0 : amdgpu_ring_commit(ring);
505 :
506 0 : if (uvd_v6_0_enc_support(adev)) {
507 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
508 0 : ring = &adev->uvd.inst->ring_enc[i];
509 0 : r = amdgpu_ring_test_helper(ring);
510 0 : if (r)
511 : goto done;
512 : }
513 : }
514 :
515 : done:
516 0 : if (!r) {
517 0 : if (uvd_v6_0_enc_support(adev))
518 0 : DRM_INFO("UVD and UVD ENC initialized successfully.\n");
519 : else
520 0 : DRM_INFO("UVD initialized successfully.\n");
521 : }
522 :
523 0 : return r;
524 : }
525 :
526 : /**
527 : * uvd_v6_0_hw_fini - stop the hardware block
528 : *
529 : * @handle: handle used to pass amdgpu_device pointer
530 : *
531 : * Stop the UVD block, mark ring as not ready any more
532 : */
533 0 : static int uvd_v6_0_hw_fini(void *handle)
534 : {
535 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
536 :
537 0 : cancel_delayed_work_sync(&adev->uvd.idle_work);
538 :
539 0 : if (RREG32(mmUVD_STATUS) != 0)
540 0 : uvd_v6_0_stop(adev);
541 :
542 0 : return 0;
543 : }
544 :
545 0 : static int uvd_v6_0_suspend(void *handle)
546 : {
547 : int r;
548 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
549 :
550 : /*
551 : * Proper cleanups before halting the HW engine:
552 : * - cancel the delayed idle work
553 : * - enable powergating
554 : * - enable clockgating
555 : * - disable dpm
556 : *
557 : * TODO: to align with the VCN implementation, move the
558 : * jobs for clockgating/powergating/dpm setting to
559 : * ->set_powergating_state().
560 : */
561 0 : cancel_delayed_work_sync(&adev->uvd.idle_work);
562 :
563 0 : if (adev->pm.dpm_enabled) {
564 0 : amdgpu_dpm_enable_uvd(adev, false);
565 : } else {
566 0 : amdgpu_asic_set_uvd_clocks(adev, 0, 0);
567 : /* shutdown the UVD block */
568 0 : amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
569 : AMD_PG_STATE_GATE);
570 0 : amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
571 : AMD_CG_STATE_GATE);
572 : }
573 :
574 0 : r = uvd_v6_0_hw_fini(adev);
575 0 : if (r)
576 : return r;
577 :
578 0 : return amdgpu_uvd_suspend(adev);
579 : }
580 :
581 0 : static int uvd_v6_0_resume(void *handle)
582 : {
583 : int r;
584 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585 :
586 0 : r = amdgpu_uvd_resume(adev);
587 0 : if (r)
588 : return r;
589 :
590 0 : return uvd_v6_0_hw_init(adev);
591 : }
592 :
593 : /**
594 : * uvd_v6_0_mc_resume - memory controller programming
595 : *
596 : * @adev: amdgpu_device pointer
597 : *
598 : * Let the UVD memory controller know it's offsets
599 : */
600 0 : static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
601 : {
602 : uint64_t offset;
603 : uint32_t size;
604 :
605 : /* program memory controller bits 0-27 */
606 0 : WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
607 : lower_32_bits(adev->uvd.inst->gpu_addr));
608 0 : WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
609 : upper_32_bits(adev->uvd.inst->gpu_addr));
610 :
611 0 : offset = AMDGPU_UVD_FIRMWARE_OFFSET;
612 0 : size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
613 0 : WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
614 0 : WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
615 :
616 0 : offset += size;
617 0 : size = AMDGPU_UVD_HEAP_SIZE;
618 0 : WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
619 0 : WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
620 :
621 0 : offset += size;
622 0 : size = AMDGPU_UVD_STACK_SIZE +
623 0 : (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
624 0 : WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
625 0 : WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
626 :
627 0 : WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
628 0 : WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
629 0 : WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
630 :
631 0 : WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
632 0 : }
633 :
634 : #if 0
635 : static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
636 : bool enable)
637 : {
638 : u32 data, data1;
639 :
640 : data = RREG32(mmUVD_CGC_GATE);
641 : data1 = RREG32(mmUVD_SUVD_CGC_GATE);
642 : if (enable) {
643 : data |= UVD_CGC_GATE__SYS_MASK |
644 : UVD_CGC_GATE__UDEC_MASK |
645 : UVD_CGC_GATE__MPEG2_MASK |
646 : UVD_CGC_GATE__RBC_MASK |
647 : UVD_CGC_GATE__LMI_MC_MASK |
648 : UVD_CGC_GATE__IDCT_MASK |
649 : UVD_CGC_GATE__MPRD_MASK |
650 : UVD_CGC_GATE__MPC_MASK |
651 : UVD_CGC_GATE__LBSI_MASK |
652 : UVD_CGC_GATE__LRBBM_MASK |
653 : UVD_CGC_GATE__UDEC_RE_MASK |
654 : UVD_CGC_GATE__UDEC_CM_MASK |
655 : UVD_CGC_GATE__UDEC_IT_MASK |
656 : UVD_CGC_GATE__UDEC_DB_MASK |
657 : UVD_CGC_GATE__UDEC_MP_MASK |
658 : UVD_CGC_GATE__WCB_MASK |
659 : UVD_CGC_GATE__VCPU_MASK |
660 : UVD_CGC_GATE__SCPU_MASK;
661 : data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
662 : UVD_SUVD_CGC_GATE__SIT_MASK |
663 : UVD_SUVD_CGC_GATE__SMP_MASK |
664 : UVD_SUVD_CGC_GATE__SCM_MASK |
665 : UVD_SUVD_CGC_GATE__SDB_MASK |
666 : UVD_SUVD_CGC_GATE__SRE_H264_MASK |
667 : UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
668 : UVD_SUVD_CGC_GATE__SIT_H264_MASK |
669 : UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
670 : UVD_SUVD_CGC_GATE__SCM_H264_MASK |
671 : UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
672 : UVD_SUVD_CGC_GATE__SDB_H264_MASK |
673 : UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
674 : } else {
675 : data &= ~(UVD_CGC_GATE__SYS_MASK |
676 : UVD_CGC_GATE__UDEC_MASK |
677 : UVD_CGC_GATE__MPEG2_MASK |
678 : UVD_CGC_GATE__RBC_MASK |
679 : UVD_CGC_GATE__LMI_MC_MASK |
680 : UVD_CGC_GATE__LMI_UMC_MASK |
681 : UVD_CGC_GATE__IDCT_MASK |
682 : UVD_CGC_GATE__MPRD_MASK |
683 : UVD_CGC_GATE__MPC_MASK |
684 : UVD_CGC_GATE__LBSI_MASK |
685 : UVD_CGC_GATE__LRBBM_MASK |
686 : UVD_CGC_GATE__UDEC_RE_MASK |
687 : UVD_CGC_GATE__UDEC_CM_MASK |
688 : UVD_CGC_GATE__UDEC_IT_MASK |
689 : UVD_CGC_GATE__UDEC_DB_MASK |
690 : UVD_CGC_GATE__UDEC_MP_MASK |
691 : UVD_CGC_GATE__WCB_MASK |
692 : UVD_CGC_GATE__VCPU_MASK |
693 : UVD_CGC_GATE__SCPU_MASK);
694 : data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
695 : UVD_SUVD_CGC_GATE__SIT_MASK |
696 : UVD_SUVD_CGC_GATE__SMP_MASK |
697 : UVD_SUVD_CGC_GATE__SCM_MASK |
698 : UVD_SUVD_CGC_GATE__SDB_MASK |
699 : UVD_SUVD_CGC_GATE__SRE_H264_MASK |
700 : UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
701 : UVD_SUVD_CGC_GATE__SIT_H264_MASK |
702 : UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
703 : UVD_SUVD_CGC_GATE__SCM_H264_MASK |
704 : UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
705 : UVD_SUVD_CGC_GATE__SDB_H264_MASK |
706 : UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
707 : }
708 : WREG32(mmUVD_CGC_GATE, data);
709 : WREG32(mmUVD_SUVD_CGC_GATE, data1);
710 : }
711 : #endif
712 :
713 : /**
714 : * uvd_v6_0_start - start UVD block
715 : *
716 : * @adev: amdgpu_device pointer
717 : *
718 : * Setup and start the UVD block
719 : */
720 0 : static int uvd_v6_0_start(struct amdgpu_device *adev)
721 : {
722 0 : struct amdgpu_ring *ring = &adev->uvd.inst->ring;
723 : uint32_t rb_bufsz, tmp;
724 : uint32_t lmi_swap_cntl;
725 : uint32_t mp_swap_cntl;
726 : int i, j, r;
727 :
728 : /* disable DPG */
729 0 : WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
730 :
731 : /* disable byte swapping */
732 0 : lmi_swap_cntl = 0;
733 0 : mp_swap_cntl = 0;
734 :
735 0 : uvd_v6_0_mc_resume(adev);
736 :
737 : /* disable interupt */
738 0 : WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
739 :
740 : /* stall UMC and register bus before resetting VCPU */
741 0 : WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
742 0 : mdelay(1);
743 :
744 : /* put LMI, VCPU, RBC etc... into reset */
745 0 : WREG32(mmUVD_SOFT_RESET,
746 : UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
747 : UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
748 : UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
749 : UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
750 : UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
751 : UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
752 : UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
753 : UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
754 0 : mdelay(5);
755 :
756 : /* take UVD block out of reset */
757 0 : WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
758 0 : mdelay(5);
759 :
760 : /* initialize UVD memory controller */
761 0 : WREG32(mmUVD_LMI_CTRL,
762 : (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
763 : UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
764 : UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
765 : UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
766 : UVD_LMI_CTRL__REQ_MODE_MASK |
767 : UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
768 :
769 : #ifdef __BIG_ENDIAN
770 : /* swap (8 in 32) RB and IB */
771 : lmi_swap_cntl = 0xa;
772 : mp_swap_cntl = 0;
773 : #endif
774 0 : WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
775 0 : WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
776 :
777 0 : WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
778 0 : WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
779 0 : WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
780 0 : WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
781 0 : WREG32(mmUVD_MPC_SET_ALU, 0);
782 0 : WREG32(mmUVD_MPC_SET_MUX, 0x88);
783 :
784 : /* take all subblocks out of reset, except VCPU */
785 0 : WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
786 0 : mdelay(5);
787 :
788 : /* enable VCPU clock */
789 0 : WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
790 :
791 : /* enable UMC */
792 0 : WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
793 :
794 : /* boot up the VCPU */
795 0 : WREG32(mmUVD_SOFT_RESET, 0);
796 0 : mdelay(10);
797 :
798 0 : for (i = 0; i < 10; ++i) {
799 : uint32_t status;
800 :
801 0 : for (j = 0; j < 100; ++j) {
802 0 : status = RREG32(mmUVD_STATUS);
803 0 : if (status & 2)
804 : break;
805 0 : mdelay(10);
806 : }
807 0 : r = 0;
808 0 : if (status & 2)
809 : break;
810 :
811 0 : DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
812 0 : WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
813 0 : mdelay(10);
814 0 : WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
815 0 : mdelay(10);
816 0 : r = -1;
817 : }
818 :
819 0 : if (r) {
820 0 : DRM_ERROR("UVD not responding, giving up!!!\n");
821 0 : return r;
822 : }
823 : /* enable master interrupt */
824 0 : WREG32_P(mmUVD_MASTINT_EN,
825 : (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
826 : ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
827 :
828 : /* clear the bit 4 of UVD_STATUS */
829 0 : WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
830 :
831 : /* force RBC into idle state */
832 0 : rb_bufsz = order_base_2(ring->ring_size);
833 0 : tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
834 0 : tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
835 0 : tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
836 0 : tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
837 0 : tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
838 0 : tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
839 0 : WREG32(mmUVD_RBC_RB_CNTL, tmp);
840 :
841 : /* set the write pointer delay */
842 0 : WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
843 :
844 : /* set the wb address */
845 0 : WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
846 :
847 : /* program the RB_BASE for ring buffer */
848 0 : WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
849 : lower_32_bits(ring->gpu_addr));
850 0 : WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
851 : upper_32_bits(ring->gpu_addr));
852 :
853 : /* Initialize the ring buffer's read and write pointers */
854 0 : WREG32(mmUVD_RBC_RB_RPTR, 0);
855 :
856 0 : ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
857 0 : WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
858 :
859 0 : WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
860 :
861 0 : if (uvd_v6_0_enc_support(adev)) {
862 0 : ring = &adev->uvd.inst->ring_enc[0];
863 0 : WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
864 0 : WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
865 0 : WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
866 0 : WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
867 0 : WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
868 :
869 0 : ring = &adev->uvd.inst->ring_enc[1];
870 0 : WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
871 0 : WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
872 0 : WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
873 0 : WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
874 0 : WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
875 : }
876 :
877 : return 0;
878 : }
879 :
880 : /**
881 : * uvd_v6_0_stop - stop UVD block
882 : *
883 : * @adev: amdgpu_device pointer
884 : *
885 : * stop the UVD block
886 : */
887 0 : static void uvd_v6_0_stop(struct amdgpu_device *adev)
888 : {
889 : /* force RBC into idle state */
890 0 : WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
891 :
892 : /* Stall UMC and register bus before resetting VCPU */
893 0 : WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
894 0 : mdelay(1);
895 :
896 : /* put VCPU into reset */
897 0 : WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
898 0 : mdelay(5);
899 :
900 : /* disable VCPU clock */
901 0 : WREG32(mmUVD_VCPU_CNTL, 0x0);
902 :
903 : /* Unstall UMC and register bus */
904 0 : WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
905 :
906 0 : WREG32(mmUVD_STATUS, 0);
907 0 : }
908 :
909 : /**
910 : * uvd_v6_0_ring_emit_fence - emit an fence & trap command
911 : *
912 : * @ring: amdgpu_ring pointer
913 : * @addr: address
914 : * @seq: sequence number
915 : * @flags: fence related flags
916 : *
917 : * Write a fence and a trap command to the ring.
918 : */
919 0 : static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
920 : unsigned flags)
921 : {
922 0 : WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
923 :
924 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
925 0 : amdgpu_ring_write(ring, seq);
926 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
927 0 : amdgpu_ring_write(ring, addr & 0xffffffff);
928 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
929 0 : amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
930 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
931 0 : amdgpu_ring_write(ring, 0);
932 :
933 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
934 0 : amdgpu_ring_write(ring, 0);
935 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
936 0 : amdgpu_ring_write(ring, 0);
937 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
938 0 : amdgpu_ring_write(ring, 2);
939 0 : }
940 :
941 : /**
942 : * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
943 : *
944 : * @ring: amdgpu_ring pointer
945 : * @addr: address
946 : * @seq: sequence number
947 : * @flags: fence related flags
948 : *
949 : * Write enc a fence and a trap command to the ring.
950 : */
951 0 : static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
952 : u64 seq, unsigned flags)
953 : {
954 0 : WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
955 :
956 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
957 0 : amdgpu_ring_write(ring, addr);
958 0 : amdgpu_ring_write(ring, upper_32_bits(addr));
959 0 : amdgpu_ring_write(ring, seq);
960 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
961 0 : }
962 :
963 : /**
964 : * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
965 : *
966 : * @ring: amdgpu_ring pointer
967 : */
968 0 : static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
969 : {
970 : /* The firmware doesn't seem to like touching registers at this point. */
971 0 : }
972 :
973 : /**
974 : * uvd_v6_0_ring_test_ring - register write test
975 : *
976 : * @ring: amdgpu_ring pointer
977 : *
978 : * Test if we can successfully write to the context register
979 : */
980 0 : static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
981 : {
982 0 : struct amdgpu_device *adev = ring->adev;
983 0 : uint32_t tmp = 0;
984 : unsigned i;
985 : int r;
986 :
987 0 : WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
988 0 : r = amdgpu_ring_alloc(ring, 3);
989 0 : if (r)
990 : return r;
991 :
992 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
993 0 : amdgpu_ring_write(ring, 0xDEADBEEF);
994 0 : amdgpu_ring_commit(ring);
995 0 : for (i = 0; i < adev->usec_timeout; i++) {
996 0 : tmp = RREG32(mmUVD_CONTEXT_ID);
997 0 : if (tmp == 0xDEADBEEF)
998 : break;
999 0 : udelay(1);
1000 : }
1001 :
1002 0 : if (i >= adev->usec_timeout)
1003 0 : r = -ETIMEDOUT;
1004 :
1005 : return r;
1006 : }
1007 :
1008 : /**
1009 : * uvd_v6_0_ring_emit_ib - execute indirect buffer
1010 : *
1011 : * @ring: amdgpu_ring pointer
1012 : * @job: job to retrieve vmid from
1013 : * @ib: indirect buffer to execute
1014 : * @flags: unused
1015 : *
1016 : * Write ring commands to execute the indirect buffer
1017 : */
1018 0 : static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1019 : struct amdgpu_job *job,
1020 : struct amdgpu_ib *ib,
1021 : uint32_t flags)
1022 : {
1023 0 : unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1024 :
1025 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1026 0 : amdgpu_ring_write(ring, vmid);
1027 :
1028 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1029 0 : amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1030 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1031 0 : amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1032 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1033 0 : amdgpu_ring_write(ring, ib->length_dw);
1034 0 : }
1035 :
1036 : /**
1037 : * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1038 : *
1039 : * @ring: amdgpu_ring pointer
1040 : * @job: job to retrive vmid from
1041 : * @ib: indirect buffer to execute
1042 : * @flags: unused
1043 : *
1044 : * Write enc ring commands to execute the indirect buffer
1045 : */
1046 0 : static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1047 : struct amdgpu_job *job,
1048 : struct amdgpu_ib *ib,
1049 : uint32_t flags)
1050 : {
1051 0 : unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1052 :
1053 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1054 0 : amdgpu_ring_write(ring, vmid);
1055 0 : amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1056 0 : amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1057 0 : amdgpu_ring_write(ring, ib->length_dw);
1058 0 : }
1059 :
1060 0 : static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1061 : uint32_t reg, uint32_t val)
1062 : {
1063 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1064 0 : amdgpu_ring_write(ring, reg << 2);
1065 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1066 0 : amdgpu_ring_write(ring, val);
1067 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1068 0 : amdgpu_ring_write(ring, 0x8);
1069 0 : }
1070 :
1071 0 : static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1072 : unsigned vmid, uint64_t pd_addr)
1073 : {
1074 0 : amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1075 :
1076 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1077 0 : amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1078 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1079 0 : amdgpu_ring_write(ring, 0);
1080 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1081 0 : amdgpu_ring_write(ring, 1 << vmid); /* mask */
1082 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1083 0 : amdgpu_ring_write(ring, 0xC);
1084 0 : }
1085 :
1086 0 : static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1087 : {
1088 0 : uint32_t seq = ring->fence_drv.sync_seq;
1089 0 : uint64_t addr = ring->fence_drv.gpu_addr;
1090 :
1091 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1092 0 : amdgpu_ring_write(ring, lower_32_bits(addr));
1093 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1094 0 : amdgpu_ring_write(ring, upper_32_bits(addr));
1095 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1096 0 : amdgpu_ring_write(ring, 0xffffffff); /* mask */
1097 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1098 0 : amdgpu_ring_write(ring, seq);
1099 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1100 0 : amdgpu_ring_write(ring, 0xE);
1101 0 : }
1102 :
1103 0 : static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1104 : {
1105 : int i;
1106 :
1107 0 : WARN_ON(ring->wptr % 2 || count % 2);
1108 :
1109 0 : for (i = 0; i < count / 2; i++) {
1110 0 : amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1111 0 : amdgpu_ring_write(ring, 0);
1112 : }
1113 0 : }
1114 :
1115 0 : static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1116 : {
1117 0 : uint32_t seq = ring->fence_drv.sync_seq;
1118 0 : uint64_t addr = ring->fence_drv.gpu_addr;
1119 :
1120 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1121 0 : amdgpu_ring_write(ring, lower_32_bits(addr));
1122 0 : amdgpu_ring_write(ring, upper_32_bits(addr));
1123 0 : amdgpu_ring_write(ring, seq);
1124 0 : }
1125 :
1126 0 : static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1127 : {
1128 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1129 0 : }
1130 :
1131 0 : static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1132 : unsigned int vmid, uint64_t pd_addr)
1133 : {
1134 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1135 0 : amdgpu_ring_write(ring, vmid);
1136 0 : amdgpu_ring_write(ring, pd_addr >> 12);
1137 :
1138 0 : amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1139 0 : amdgpu_ring_write(ring, vmid);
1140 0 : }
1141 :
1142 0 : static bool uvd_v6_0_is_idle(void *handle)
1143 : {
1144 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145 :
1146 0 : return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1147 : }
1148 :
1149 0 : static int uvd_v6_0_wait_for_idle(void *handle)
1150 : {
1151 : unsigned i;
1152 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1153 :
1154 0 : for (i = 0; i < adev->usec_timeout; i++) {
1155 0 : if (uvd_v6_0_is_idle(handle))
1156 : return 0;
1157 : }
1158 : return -ETIMEDOUT;
1159 : }
1160 :
1161 : #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1162 0 : static bool uvd_v6_0_check_soft_reset(void *handle)
1163 : {
1164 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165 0 : u32 srbm_soft_reset = 0;
1166 0 : u32 tmp = RREG32(mmSRBM_STATUS);
1167 :
1168 0 : if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1169 0 : REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1170 0 : (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1171 : srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1172 :
1173 0 : if (srbm_soft_reset) {
1174 0 : adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1175 0 : return true;
1176 : } else {
1177 0 : adev->uvd.inst->srbm_soft_reset = 0;
1178 0 : return false;
1179 : }
1180 : }
1181 :
1182 0 : static int uvd_v6_0_pre_soft_reset(void *handle)
1183 : {
1184 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185 :
1186 0 : if (!adev->uvd.inst->srbm_soft_reset)
1187 : return 0;
1188 :
1189 0 : uvd_v6_0_stop(adev);
1190 0 : return 0;
1191 : }
1192 :
1193 0 : static int uvd_v6_0_soft_reset(void *handle)
1194 : {
1195 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1196 : u32 srbm_soft_reset;
1197 :
1198 0 : if (!adev->uvd.inst->srbm_soft_reset)
1199 : return 0;
1200 0 : srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1201 :
1202 : if (srbm_soft_reset) {
1203 : u32 tmp;
1204 :
1205 0 : tmp = RREG32(mmSRBM_SOFT_RESET);
1206 0 : tmp |= srbm_soft_reset;
1207 0 : dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1208 0 : WREG32(mmSRBM_SOFT_RESET, tmp);
1209 0 : tmp = RREG32(mmSRBM_SOFT_RESET);
1210 :
1211 0 : udelay(50);
1212 :
1213 0 : tmp &= ~srbm_soft_reset;
1214 0 : WREG32(mmSRBM_SOFT_RESET, tmp);
1215 0 : tmp = RREG32(mmSRBM_SOFT_RESET);
1216 :
1217 : /* Wait a little for things to settle down */
1218 : udelay(50);
1219 : }
1220 :
1221 0 : return 0;
1222 : }
1223 :
1224 0 : static int uvd_v6_0_post_soft_reset(void *handle)
1225 : {
1226 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227 :
1228 0 : if (!adev->uvd.inst->srbm_soft_reset)
1229 : return 0;
1230 :
1231 0 : mdelay(5);
1232 :
1233 0 : return uvd_v6_0_start(adev);
1234 : }
1235 :
1236 0 : static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1237 : struct amdgpu_irq_src *source,
1238 : unsigned type,
1239 : enum amdgpu_interrupt_state state)
1240 : {
1241 : // TODO
1242 0 : return 0;
1243 : }
1244 :
1245 0 : static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1246 : struct amdgpu_irq_src *source,
1247 : struct amdgpu_iv_entry *entry)
1248 : {
1249 0 : bool int_handled = true;
1250 0 : DRM_DEBUG("IH: UVD TRAP\n");
1251 :
1252 0 : switch (entry->src_id) {
1253 : case 124:
1254 0 : amdgpu_fence_process(&adev->uvd.inst->ring);
1255 0 : break;
1256 : case 119:
1257 0 : if (likely(uvd_v6_0_enc_support(adev)))
1258 0 : amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1259 : else
1260 : int_handled = false;
1261 : break;
1262 : case 120:
1263 0 : if (likely(uvd_v6_0_enc_support(adev)))
1264 0 : amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1265 : else
1266 : int_handled = false;
1267 : break;
1268 : }
1269 :
1270 0 : if (!int_handled)
1271 0 : DRM_ERROR("Unhandled interrupt: %d %d\n",
1272 : entry->src_id, entry->src_data[0]);
1273 :
1274 0 : return 0;
1275 : }
1276 :
1277 0 : static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1278 : {
1279 : uint32_t data1, data3;
1280 :
1281 0 : data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1282 0 : data3 = RREG32(mmUVD_CGC_GATE);
1283 :
1284 0 : data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1285 : UVD_SUVD_CGC_GATE__SIT_MASK |
1286 : UVD_SUVD_CGC_GATE__SMP_MASK |
1287 : UVD_SUVD_CGC_GATE__SCM_MASK |
1288 : UVD_SUVD_CGC_GATE__SDB_MASK |
1289 : UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1290 : UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1291 : UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1292 : UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1293 : UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1294 : UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1295 : UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1296 : UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1297 :
1298 0 : if (enable) {
1299 0 : data3 |= (UVD_CGC_GATE__SYS_MASK |
1300 : UVD_CGC_GATE__UDEC_MASK |
1301 : UVD_CGC_GATE__MPEG2_MASK |
1302 : UVD_CGC_GATE__RBC_MASK |
1303 : UVD_CGC_GATE__LMI_MC_MASK |
1304 : UVD_CGC_GATE__LMI_UMC_MASK |
1305 : UVD_CGC_GATE__IDCT_MASK |
1306 : UVD_CGC_GATE__MPRD_MASK |
1307 : UVD_CGC_GATE__MPC_MASK |
1308 : UVD_CGC_GATE__LBSI_MASK |
1309 : UVD_CGC_GATE__LRBBM_MASK |
1310 : UVD_CGC_GATE__UDEC_RE_MASK |
1311 : UVD_CGC_GATE__UDEC_CM_MASK |
1312 : UVD_CGC_GATE__UDEC_IT_MASK |
1313 : UVD_CGC_GATE__UDEC_DB_MASK |
1314 : UVD_CGC_GATE__UDEC_MP_MASK |
1315 : UVD_CGC_GATE__WCB_MASK |
1316 : UVD_CGC_GATE__JPEG_MASK |
1317 : UVD_CGC_GATE__SCPU_MASK |
1318 : UVD_CGC_GATE__JPEG2_MASK);
1319 : /* only in pg enabled, we can gate clock to vcpu*/
1320 0 : if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1321 0 : data3 |= UVD_CGC_GATE__VCPU_MASK;
1322 :
1323 0 : data3 &= ~UVD_CGC_GATE__REGS_MASK;
1324 : } else {
1325 : data3 = 0;
1326 : }
1327 :
1328 0 : WREG32(mmUVD_SUVD_CGC_GATE, data1);
1329 0 : WREG32(mmUVD_CGC_GATE, data3);
1330 0 : }
1331 :
1332 0 : static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1333 : {
1334 : uint32_t data, data2;
1335 :
1336 0 : data = RREG32(mmUVD_CGC_CTRL);
1337 0 : data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1338 :
1339 :
1340 0 : data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1341 : UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1342 :
1343 :
1344 0 : data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1345 : (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1346 : (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1347 :
1348 0 : data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1349 : UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1350 : UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1351 : UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1352 : UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1353 : UVD_CGC_CTRL__SYS_MODE_MASK |
1354 : UVD_CGC_CTRL__UDEC_MODE_MASK |
1355 : UVD_CGC_CTRL__MPEG2_MODE_MASK |
1356 : UVD_CGC_CTRL__REGS_MODE_MASK |
1357 : UVD_CGC_CTRL__RBC_MODE_MASK |
1358 : UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1359 : UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1360 : UVD_CGC_CTRL__IDCT_MODE_MASK |
1361 : UVD_CGC_CTRL__MPRD_MODE_MASK |
1362 : UVD_CGC_CTRL__MPC_MODE_MASK |
1363 : UVD_CGC_CTRL__LBSI_MODE_MASK |
1364 : UVD_CGC_CTRL__LRBBM_MODE_MASK |
1365 : UVD_CGC_CTRL__WCB_MODE_MASK |
1366 : UVD_CGC_CTRL__VCPU_MODE_MASK |
1367 : UVD_CGC_CTRL__JPEG_MODE_MASK |
1368 : UVD_CGC_CTRL__SCPU_MODE_MASK |
1369 : UVD_CGC_CTRL__JPEG2_MODE_MASK);
1370 0 : data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1371 : UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1372 : UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1373 : UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1374 : UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1375 :
1376 0 : WREG32(mmUVD_CGC_CTRL, data);
1377 0 : WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1378 0 : }
1379 :
1380 : #if 0
1381 : static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1382 : {
1383 : uint32_t data, data1, cgc_flags, suvd_flags;
1384 :
1385 : data = RREG32(mmUVD_CGC_GATE);
1386 : data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1387 :
1388 : cgc_flags = UVD_CGC_GATE__SYS_MASK |
1389 : UVD_CGC_GATE__UDEC_MASK |
1390 : UVD_CGC_GATE__MPEG2_MASK |
1391 : UVD_CGC_GATE__RBC_MASK |
1392 : UVD_CGC_GATE__LMI_MC_MASK |
1393 : UVD_CGC_GATE__IDCT_MASK |
1394 : UVD_CGC_GATE__MPRD_MASK |
1395 : UVD_CGC_GATE__MPC_MASK |
1396 : UVD_CGC_GATE__LBSI_MASK |
1397 : UVD_CGC_GATE__LRBBM_MASK |
1398 : UVD_CGC_GATE__UDEC_RE_MASK |
1399 : UVD_CGC_GATE__UDEC_CM_MASK |
1400 : UVD_CGC_GATE__UDEC_IT_MASK |
1401 : UVD_CGC_GATE__UDEC_DB_MASK |
1402 : UVD_CGC_GATE__UDEC_MP_MASK |
1403 : UVD_CGC_GATE__WCB_MASK |
1404 : UVD_CGC_GATE__VCPU_MASK |
1405 : UVD_CGC_GATE__SCPU_MASK |
1406 : UVD_CGC_GATE__JPEG_MASK |
1407 : UVD_CGC_GATE__JPEG2_MASK;
1408 :
1409 : suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1410 : UVD_SUVD_CGC_GATE__SIT_MASK |
1411 : UVD_SUVD_CGC_GATE__SMP_MASK |
1412 : UVD_SUVD_CGC_GATE__SCM_MASK |
1413 : UVD_SUVD_CGC_GATE__SDB_MASK;
1414 :
1415 : data |= cgc_flags;
1416 : data1 |= suvd_flags;
1417 :
1418 : WREG32(mmUVD_CGC_GATE, data);
1419 : WREG32(mmUVD_SUVD_CGC_GATE, data1);
1420 : }
1421 : #endif
1422 :
1423 0 : static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1424 : bool enable)
1425 : {
1426 : u32 orig, data;
1427 :
1428 0 : if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1429 0 : data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1430 0 : data |= 0xfff;
1431 0 : WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1432 :
1433 0 : orig = data = RREG32(mmUVD_CGC_CTRL);
1434 0 : data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1435 0 : if (orig != data)
1436 0 : WREG32(mmUVD_CGC_CTRL, data);
1437 : } else {
1438 0 : data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1439 0 : data &= ~0xfff;
1440 0 : WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1441 :
1442 0 : orig = data = RREG32(mmUVD_CGC_CTRL);
1443 0 : data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1444 0 : if (orig != data)
1445 0 : WREG32(mmUVD_CGC_CTRL, data);
1446 : }
1447 0 : }
1448 :
1449 0 : static int uvd_v6_0_set_clockgating_state(void *handle,
1450 : enum amd_clockgating_state state)
1451 : {
1452 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453 0 : bool enable = (state == AMD_CG_STATE_GATE);
1454 :
1455 0 : if (enable) {
1456 : /* wait for STATUS to clear */
1457 0 : if (uvd_v6_0_wait_for_idle(handle))
1458 : return -EBUSY;
1459 0 : uvd_v6_0_enable_clock_gating(adev, true);
1460 : /* enable HW gates because UVD is idle */
1461 : /* uvd_v6_0_set_hw_clock_gating(adev); */
1462 : } else {
1463 : /* disable HW gating and enable Sw gating */
1464 0 : uvd_v6_0_enable_clock_gating(adev, false);
1465 : }
1466 0 : uvd_v6_0_set_sw_clock_gating(adev);
1467 0 : return 0;
1468 : }
1469 :
1470 0 : static int uvd_v6_0_set_powergating_state(void *handle,
1471 : enum amd_powergating_state state)
1472 : {
1473 : /* This doesn't actually powergate the UVD block.
1474 : * That's done in the dpm code via the SMC. This
1475 : * just re-inits the block as necessary. The actual
1476 : * gating still happens in the dpm code. We should
1477 : * revisit this when there is a cleaner line between
1478 : * the smc and the hw blocks
1479 : */
1480 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481 0 : int ret = 0;
1482 :
1483 0 : WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1484 :
1485 0 : if (state == AMD_PG_STATE_GATE) {
1486 0 : uvd_v6_0_stop(adev);
1487 : } else {
1488 0 : ret = uvd_v6_0_start(adev);
1489 : if (ret)
1490 : goto out;
1491 : }
1492 :
1493 : out:
1494 0 : return ret;
1495 : }
1496 :
1497 0 : static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
1498 : {
1499 0 : struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1500 : int data;
1501 :
1502 0 : mutex_lock(&adev->pm.mutex);
1503 :
1504 0 : if (adev->flags & AMD_IS_APU)
1505 0 : data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1506 : else
1507 0 : data = RREG32_SMC(ixCURRENT_PG_STATUS);
1508 :
1509 0 : if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1510 0 : DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1511 0 : goto out;
1512 : }
1513 :
1514 : /* AMD_CG_SUPPORT_UVD_MGCG */
1515 0 : data = RREG32(mmUVD_CGC_CTRL);
1516 0 : if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1517 0 : *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1518 :
1519 : out:
1520 0 : mutex_unlock(&adev->pm.mutex);
1521 0 : }
1522 :
1523 : static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1524 : .name = "uvd_v6_0",
1525 : .early_init = uvd_v6_0_early_init,
1526 : .late_init = NULL,
1527 : .sw_init = uvd_v6_0_sw_init,
1528 : .sw_fini = uvd_v6_0_sw_fini,
1529 : .hw_init = uvd_v6_0_hw_init,
1530 : .hw_fini = uvd_v6_0_hw_fini,
1531 : .suspend = uvd_v6_0_suspend,
1532 : .resume = uvd_v6_0_resume,
1533 : .is_idle = uvd_v6_0_is_idle,
1534 : .wait_for_idle = uvd_v6_0_wait_for_idle,
1535 : .check_soft_reset = uvd_v6_0_check_soft_reset,
1536 : .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1537 : .soft_reset = uvd_v6_0_soft_reset,
1538 : .post_soft_reset = uvd_v6_0_post_soft_reset,
1539 : .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1540 : .set_powergating_state = uvd_v6_0_set_powergating_state,
1541 : .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1542 : };
1543 :
1544 : static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1545 : .type = AMDGPU_RING_TYPE_UVD,
1546 : .align_mask = 0xf,
1547 : .support_64bit_ptrs = false,
1548 : .no_user_fence = true,
1549 : .get_rptr = uvd_v6_0_ring_get_rptr,
1550 : .get_wptr = uvd_v6_0_ring_get_wptr,
1551 : .set_wptr = uvd_v6_0_ring_set_wptr,
1552 : .parse_cs = amdgpu_uvd_ring_parse_cs,
1553 : .emit_frame_size =
1554 : 6 + /* hdp invalidate */
1555 : 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1556 : 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1557 : .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1558 : .emit_ib = uvd_v6_0_ring_emit_ib,
1559 : .emit_fence = uvd_v6_0_ring_emit_fence,
1560 : .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1561 : .test_ring = uvd_v6_0_ring_test_ring,
1562 : .test_ib = amdgpu_uvd_ring_test_ib,
1563 : .insert_nop = uvd_v6_0_ring_insert_nop,
1564 : .pad_ib = amdgpu_ring_generic_pad_ib,
1565 : .begin_use = amdgpu_uvd_ring_begin_use,
1566 : .end_use = amdgpu_uvd_ring_end_use,
1567 : .emit_wreg = uvd_v6_0_ring_emit_wreg,
1568 : };
1569 :
1570 : static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1571 : .type = AMDGPU_RING_TYPE_UVD,
1572 : .align_mask = 0xf,
1573 : .support_64bit_ptrs = false,
1574 : .no_user_fence = true,
1575 : .get_rptr = uvd_v6_0_ring_get_rptr,
1576 : .get_wptr = uvd_v6_0_ring_get_wptr,
1577 : .set_wptr = uvd_v6_0_ring_set_wptr,
1578 : .emit_frame_size =
1579 : 6 + /* hdp invalidate */
1580 : 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1581 : VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1582 : 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1583 : .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1584 : .emit_ib = uvd_v6_0_ring_emit_ib,
1585 : .emit_fence = uvd_v6_0_ring_emit_fence,
1586 : .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1587 : .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1588 : .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1589 : .test_ring = uvd_v6_0_ring_test_ring,
1590 : .test_ib = amdgpu_uvd_ring_test_ib,
1591 : .insert_nop = uvd_v6_0_ring_insert_nop,
1592 : .pad_ib = amdgpu_ring_generic_pad_ib,
1593 : .begin_use = amdgpu_uvd_ring_begin_use,
1594 : .end_use = amdgpu_uvd_ring_end_use,
1595 : .emit_wreg = uvd_v6_0_ring_emit_wreg,
1596 : };
1597 :
1598 : static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1599 : .type = AMDGPU_RING_TYPE_UVD_ENC,
1600 : .align_mask = 0x3f,
1601 : .nop = HEVC_ENC_CMD_NO_OP,
1602 : .support_64bit_ptrs = false,
1603 : .no_user_fence = true,
1604 : .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1605 : .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1606 : .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1607 : .emit_frame_size =
1608 : 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1609 : 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1610 : 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1611 : 1, /* uvd_v6_0_enc_ring_insert_end */
1612 : .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1613 : .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1614 : .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1615 : .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1616 : .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1617 : .test_ring = uvd_v6_0_enc_ring_test_ring,
1618 : .test_ib = uvd_v6_0_enc_ring_test_ib,
1619 : .insert_nop = amdgpu_ring_insert_nop,
1620 : .insert_end = uvd_v6_0_enc_ring_insert_end,
1621 : .pad_ib = amdgpu_ring_generic_pad_ib,
1622 : .begin_use = amdgpu_uvd_ring_begin_use,
1623 : .end_use = amdgpu_uvd_ring_end_use,
1624 : };
1625 :
1626 : static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1627 : {
1628 0 : if (adev->asic_type >= CHIP_POLARIS10) {
1629 0 : adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1630 0 : DRM_INFO("UVD is enabled in VM mode\n");
1631 : } else {
1632 0 : adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1633 0 : DRM_INFO("UVD is enabled in physical mode\n");
1634 : }
1635 : }
1636 :
1637 : static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1638 : {
1639 : int i;
1640 :
1641 0 : for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1642 0 : adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1643 :
1644 0 : DRM_INFO("UVD ENC is enabled in VM mode\n");
1645 : }
1646 :
1647 : static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1648 : .set = uvd_v6_0_set_interrupt_state,
1649 : .process = uvd_v6_0_process_interrupt,
1650 : };
1651 :
1652 : static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1653 : {
1654 0 : if (uvd_v6_0_enc_support(adev))
1655 0 : adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1656 : else
1657 0 : adev->uvd.inst->irq.num_types = 1;
1658 :
1659 0 : adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1660 : }
1661 :
1662 : const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1663 : {
1664 : .type = AMD_IP_BLOCK_TYPE_UVD,
1665 : .major = 6,
1666 : .minor = 0,
1667 : .rev = 0,
1668 : .funcs = &uvd_v6_0_ip_funcs,
1669 : };
1670 :
1671 : const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1672 : {
1673 : .type = AMD_IP_BLOCK_TYPE_UVD,
1674 : .major = 6,
1675 : .minor = 2,
1676 : .rev = 0,
1677 : .funcs = &uvd_v6_0_ip_funcs,
1678 : };
1679 :
1680 : const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1681 : {
1682 : .type = AMD_IP_BLOCK_TYPE_UVD,
1683 : .major = 6,
1684 : .minor = 3,
1685 : .rev = 0,
1686 : .funcs = &uvd_v6_0_ip_funcs,
1687 : };
|