Line data Source code
1 : /*
2 : * Copyright 2015 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: AMD
23 : *
24 : */
25 :
26 : #include "dm_services_types.h"
27 : #include "dc.h"
28 :
29 : #include "amdgpu.h"
30 : #include "amdgpu_dm.h"
31 : #include "amdgpu_dm_irq.h"
32 :
33 : /**
34 : * DOC: overview
35 : *
36 : * DM provides another layer of IRQ management on top of what the base driver
37 : * already provides. This is something that could be cleaned up, and is a
38 : * future TODO item.
39 : *
40 : * The base driver provides IRQ source registration with DRM, handler
41 : * registration into the base driver's IRQ table, and a handler callback
42 : * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
43 : * handler looks up the IRQ table, and calls the respective
44 : * &amdgpu_irq_src_funcs.process hookups.
45 : *
46 : * What DM provides on top are two IRQ tables specifically for top-half and
47 : * bottom-half IRQ handling, with the bottom-half implementing workqueues:
48 : *
49 : * - &amdgpu_display_manager.irq_handler_list_high_tab
50 : * - &amdgpu_display_manager.irq_handler_list_low_tab
51 : *
52 : * They override the base driver's IRQ table, and the effect can be seen
53 : * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
54 : * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
55 : * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
56 : * still needs to register the IRQ with the base driver. See
57 : * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
58 : *
59 : * To expose DC's hardware interrupt toggle to the base driver, DM implements
60 : * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
61 : * amdgpu_irq_update() to enable or disable the interrupt.
62 : */
63 :
64 : /******************************************************************************
65 : * Private declarations.
66 : *****************************************************************************/
67 :
68 : /**
69 : * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
70 : *
71 : * @list: Linked list entry referencing the next/previous handler
72 : * @handler: Handler function
73 : * @handler_arg: Argument passed to the handler when triggered
74 : * @dm: DM which this handler belongs to
75 : * @irq_source: DC interrupt source that this handler is registered for
76 : * @work: work struct
77 : */
78 : struct amdgpu_dm_irq_handler_data {
79 : struct list_head list;
80 : interrupt_handler handler;
81 : void *handler_arg;
82 :
83 : struct amdgpu_display_manager *dm;
84 : /* DAL irq source which registered for this interrupt. */
85 : enum dc_irq_source irq_source;
86 : struct work_struct work;
87 : };
88 :
89 : #define DM_IRQ_TABLE_LOCK(adev, flags) \
90 : spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
91 :
92 : #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
93 : spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
94 :
95 : /******************************************************************************
96 : * Private functions.
97 : *****************************************************************************/
98 :
99 : static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
100 : void (*ih)(void *),
101 : void *args,
102 : struct amdgpu_display_manager *dm)
103 : {
104 0 : hcd->handler = ih;
105 0 : hcd->handler_arg = args;
106 0 : hcd->dm = dm;
107 : }
108 :
109 : /**
110 : * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
111 : *
112 : * @work: work struct
113 : */
114 0 : static void dm_irq_work_func(struct work_struct *work)
115 : {
116 0 : struct amdgpu_dm_irq_handler_data *handler_data =
117 0 : container_of(work, struct amdgpu_dm_irq_handler_data, work);
118 :
119 0 : handler_data->handler(handler_data->handler_arg);
120 :
121 : /* Call a DAL subcomponent which registered for interrupt notification
122 : * at INTERRUPT_LOW_IRQ_CONTEXT.
123 : * (The most common use is HPD interrupt) */
124 0 : }
125 :
126 : /*
127 : * Remove a handler and return a pointer to handler list from which the
128 : * handler was removed.
129 : */
130 0 : static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
131 : void *ih,
132 : const struct dc_interrupt_params *int_params)
133 : {
134 : struct list_head *hnd_list;
135 : struct list_head *entry, *tmp;
136 : struct amdgpu_dm_irq_handler_data *handler;
137 : unsigned long irq_table_flags;
138 0 : bool handler_removed = false;
139 : enum dc_irq_source irq_source;
140 :
141 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
142 :
143 0 : irq_source = int_params->irq_source;
144 :
145 0 : switch (int_params->int_context) {
146 : case INTERRUPT_HIGH_IRQ_CONTEXT:
147 0 : hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
148 0 : break;
149 : case INTERRUPT_LOW_IRQ_CONTEXT:
150 : default:
151 0 : hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
152 0 : break;
153 : }
154 :
155 0 : list_for_each_safe(entry, tmp, hnd_list) {
156 :
157 0 : handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
158 : list);
159 :
160 0 : if (handler == NULL)
161 0 : continue;
162 :
163 0 : if (ih == handler->handler) {
164 : /* Found our handler. Remove it from the list. */
165 0 : list_del(&handler->list);
166 0 : handler_removed = true;
167 0 : break;
168 : }
169 : }
170 :
171 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
172 :
173 0 : if (handler_removed == false) {
174 : /* Not necessarily an error - caller may not
175 : * know the context. */
176 : return NULL;
177 : }
178 :
179 0 : kfree(handler);
180 :
181 0 : DRM_DEBUG_KMS(
182 : "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
183 : ih, int_params->irq_source, int_params->int_context);
184 :
185 0 : return hnd_list;
186 : }
187 :
188 : /**
189 : * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
190 : * @adev: The base driver device containing the DM device
191 : *
192 : * Go through low and high context IRQ tables and deallocate handlers.
193 : */
194 0 : static void unregister_all_irq_handlers(struct amdgpu_device *adev)
195 : {
196 : struct list_head *hnd_list_low;
197 : struct list_head *hnd_list_high;
198 : struct list_head *entry, *tmp;
199 : struct amdgpu_dm_irq_handler_data *handler;
200 : unsigned long irq_table_flags;
201 : int i;
202 :
203 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
204 :
205 0 : for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
206 0 : hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
207 0 : hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
208 :
209 0 : list_for_each_safe(entry, tmp, hnd_list_low) {
210 :
211 0 : handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
212 : list);
213 :
214 0 : if (handler == NULL || handler->handler == NULL)
215 0 : continue;
216 :
217 0 : list_del(&handler->list);
218 0 : kfree(handler);
219 : }
220 :
221 0 : list_for_each_safe(entry, tmp, hnd_list_high) {
222 :
223 0 : handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
224 : list);
225 :
226 0 : if (handler == NULL || handler->handler == NULL)
227 0 : continue;
228 :
229 0 : list_del(&handler->list);
230 0 : kfree(handler);
231 : }
232 : }
233 :
234 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
235 0 : }
236 :
237 : static bool
238 0 : validate_irq_registration_params(struct dc_interrupt_params *int_params,
239 : void (*ih)(void *))
240 : {
241 0 : if (NULL == int_params || NULL == ih) {
242 0 : DRM_ERROR("DM_IRQ: invalid input!\n");
243 0 : return false;
244 : }
245 :
246 0 : if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
247 0 : DRM_ERROR("DM_IRQ: invalid context: %d!\n",
248 : int_params->int_context);
249 0 : return false;
250 : }
251 :
252 0 : if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
253 0 : DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
254 : int_params->irq_source);
255 0 : return false;
256 : }
257 :
258 : return true;
259 : }
260 :
261 0 : static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
262 : irq_handler_idx handler_idx)
263 : {
264 0 : if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
265 0 : DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
266 0 : return false;
267 : }
268 :
269 0 : if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
270 0 : DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
271 0 : return false;
272 : }
273 :
274 : return true;
275 : }
276 : /******************************************************************************
277 : * Public functions.
278 : *
279 : * Note: caller is responsible for input validation.
280 : *****************************************************************************/
281 :
282 : /**
283 : * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
284 : * @adev: The base driver device containing the DM device.
285 : * @int_params: Interrupt parameters containing the source, and handler context
286 : * @ih: Function pointer to the interrupt handler to register
287 : * @handler_args: Arguments passed to the handler when the interrupt occurs
288 : *
289 : * Register an interrupt handler for the given IRQ source, under the given
290 : * context. The context can either be high or low. High context handlers are
291 : * executed directly within ISR context, while low context is executed within a
292 : * workqueue, thereby allowing operations that sleep.
293 : *
294 : * Registered handlers are called in a FIFO manner, i.e. the most recently
295 : * registered handler will be called first.
296 : *
297 : * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
298 : * source, handler function, and args
299 : */
300 0 : void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
301 : struct dc_interrupt_params *int_params,
302 : void (*ih)(void *),
303 : void *handler_args)
304 : {
305 : struct list_head *hnd_list;
306 : struct amdgpu_dm_irq_handler_data *handler_data;
307 : unsigned long irq_table_flags;
308 : enum dc_irq_source irq_source;
309 :
310 0 : if (false == validate_irq_registration_params(int_params, ih))
311 : return DAL_INVALID_IRQ_HANDLER_IDX;
312 :
313 0 : handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
314 0 : if (!handler_data) {
315 0 : DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
316 0 : return DAL_INVALID_IRQ_HANDLER_IDX;
317 : }
318 :
319 0 : init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
320 :
321 0 : irq_source = int_params->irq_source;
322 :
323 0 : handler_data->irq_source = irq_source;
324 :
325 : /* Lock the list, add the handler. */
326 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
327 :
328 0 : switch (int_params->int_context) {
329 : case INTERRUPT_HIGH_IRQ_CONTEXT:
330 0 : hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
331 0 : break;
332 : case INTERRUPT_LOW_IRQ_CONTEXT:
333 : default:
334 0 : hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
335 0 : INIT_WORK(&handler_data->work, dm_irq_work_func);
336 0 : break;
337 : }
338 :
339 0 : list_add_tail(&handler_data->list, hnd_list);
340 :
341 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
342 :
343 : /* This pointer will be stored by code which requested interrupt
344 : * registration.
345 : * The same pointer will be needed in order to unregister the
346 : * interrupt. */
347 :
348 0 : DRM_DEBUG_KMS(
349 : "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
350 : handler_data,
351 : irq_source,
352 : int_params->int_context);
353 :
354 0 : return handler_data;
355 : }
356 :
357 : /**
358 : * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
359 : * @adev: The base driver device containing the DM device
360 : * @irq_source: IRQ source to remove the given handler from
361 : * @ih: Function pointer to the interrupt handler to unregister
362 : *
363 : * Go through both low and high context IRQ tables, and find the given handler
364 : * for the given irq source. If found, remove it. Otherwise, do nothing.
365 : */
366 0 : void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
367 : enum dc_irq_source irq_source,
368 : void *ih)
369 : {
370 : struct list_head *handler_list;
371 : struct dc_interrupt_params int_params;
372 : int i;
373 :
374 0 : if (false == validate_irq_unregistration_params(irq_source, ih))
375 0 : return;
376 :
377 0 : memset(&int_params, 0, sizeof(int_params));
378 :
379 0 : int_params.irq_source = irq_source;
380 :
381 0 : for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
382 :
383 0 : int_params.int_context = i;
384 :
385 0 : handler_list = remove_irq_handler(adev, ih, &int_params);
386 :
387 0 : if (handler_list != NULL)
388 : break;
389 : }
390 :
391 0 : if (handler_list == NULL) {
392 : /* If we got here, it means we searched all irq contexts
393 : * for this irq source, but the handler was not found. */
394 0 : DRM_ERROR(
395 : "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
396 : ih, irq_source);
397 : }
398 : }
399 :
400 : /**
401 : * amdgpu_dm_irq_init() - Initialize DM IRQ management
402 : * @adev: The base driver device containing the DM device
403 : *
404 : * Initialize DM's high and low context IRQ tables.
405 : *
406 : * The N by M table contains N IRQ sources, with M
407 : * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
408 : * list_heads are initialized here. When an interrupt n is triggered, all m
409 : * handlers are called in sequence, FIFO according to registration order.
410 : *
411 : * The low context table requires special steps to initialize, since handlers
412 : * will be deferred to a workqueue. See &struct irq_list_head.
413 : */
414 0 : int amdgpu_dm_irq_init(struct amdgpu_device *adev)
415 : {
416 : int src;
417 : struct list_head *lh;
418 :
419 0 : DRM_DEBUG_KMS("DM_IRQ\n");
420 :
421 0 : spin_lock_init(&adev->dm.irq_handler_list_table_lock);
422 :
423 0 : for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
424 : /* low context handler list init */
425 0 : lh = &adev->dm.irq_handler_list_low_tab[src];
426 0 : INIT_LIST_HEAD(lh);
427 : /* high context handler init */
428 0 : INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
429 : }
430 :
431 0 : return 0;
432 : }
433 :
434 : /**
435 : * amdgpu_dm_irq_fini() - Tear down DM IRQ management
436 : * @adev: The base driver device containing the DM device
437 : *
438 : * Flush all work within the low context IRQ table.
439 : */
440 0 : void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
441 : {
442 : int src;
443 : struct list_head *lh;
444 : struct list_head *entry, *tmp;
445 : struct amdgpu_dm_irq_handler_data *handler;
446 : unsigned long irq_table_flags;
447 :
448 0 : DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
449 0 : for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
450 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
451 : /* The handler was removed from the table,
452 : * it means it is safe to flush all the 'work'
453 : * (because no code can schedule a new one). */
454 0 : lh = &adev->dm.irq_handler_list_low_tab[src];
455 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
456 :
457 0 : if (!list_empty(lh)) {
458 0 : list_for_each_safe(entry, tmp, lh) {
459 0 : handler = list_entry(
460 : entry,
461 : struct amdgpu_dm_irq_handler_data,
462 : list);
463 0 : flush_work(&handler->work);
464 : }
465 : }
466 : }
467 : /* Deallocate handlers from the table. */
468 0 : unregister_all_irq_handlers(adev);
469 0 : }
470 :
471 0 : int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
472 : {
473 : int src;
474 : struct list_head *hnd_list_h;
475 : struct list_head *hnd_list_l;
476 : unsigned long irq_table_flags;
477 : struct list_head *entry, *tmp;
478 : struct amdgpu_dm_irq_handler_data *handler;
479 :
480 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
481 :
482 0 : DRM_DEBUG_KMS("DM_IRQ: suspend\n");
483 :
484 : /**
485 : * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
486 : * will be disabled from manage_dm_interrupts on disable CRTC.
487 : */
488 0 : for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
489 0 : hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
490 0 : hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
491 0 : if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
492 0 : dc_interrupt_set(adev->dm.dc, src, false);
493 :
494 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
495 :
496 0 : if (!list_empty(hnd_list_l)) {
497 0 : list_for_each_safe (entry, tmp, hnd_list_l) {
498 0 : handler = list_entry(
499 : entry,
500 : struct amdgpu_dm_irq_handler_data,
501 : list);
502 0 : flush_work(&handler->work);
503 : }
504 : }
505 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
506 : }
507 :
508 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
509 0 : return 0;
510 : }
511 :
512 0 : int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
513 : {
514 : int src;
515 : struct list_head *hnd_list_h, *hnd_list_l;
516 : unsigned long irq_table_flags;
517 :
518 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
519 :
520 0 : DRM_DEBUG_KMS("DM_IRQ: early resume\n");
521 :
522 : /* re-enable short pulse interrupts HW interrupt */
523 0 : for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
524 0 : hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
525 0 : hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
526 0 : if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
527 0 : dc_interrupt_set(adev->dm.dc, src, true);
528 : }
529 :
530 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
531 :
532 0 : return 0;
533 : }
534 :
535 0 : int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
536 : {
537 : int src;
538 : struct list_head *hnd_list_h, *hnd_list_l;
539 : unsigned long irq_table_flags;
540 :
541 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
542 :
543 0 : DRM_DEBUG_KMS("DM_IRQ: resume\n");
544 :
545 : /**
546 : * Renable HW interrupt for HPD and only since FLIP and VBLANK
547 : * will be enabled from manage_dm_interrupts on enable CRTC.
548 : */
549 0 : for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
550 0 : hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
551 0 : hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
552 0 : if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
553 0 : dc_interrupt_set(adev->dm.dc, src, true);
554 : }
555 :
556 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
557 0 : return 0;
558 : }
559 :
560 : /*
561 : * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
562 : * "irq_source".
563 : */
564 0 : static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
565 : enum dc_irq_source irq_source)
566 : {
567 0 : struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
568 : struct amdgpu_dm_irq_handler_data *handler_data;
569 0 : bool work_queued = false;
570 :
571 0 : if (list_empty(handler_list))
572 : return;
573 :
574 0 : list_for_each_entry (handler_data, handler_list, list) {
575 0 : if (queue_work(system_highpri_wq, &handler_data->work)) {
576 : work_queued = true;
577 : break;
578 : }
579 : }
580 :
581 0 : if (!work_queued) {
582 : struct amdgpu_dm_irq_handler_data *handler_data_add;
583 : /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
584 0 : handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
585 :
586 : /*allocate a new amdgpu_dm_irq_handler_data*/
587 0 : handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
588 0 : if (!handler_data_add) {
589 0 : DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
590 0 : return;
591 : }
592 :
593 : /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
594 0 : handler_data_add->handler = handler_data->handler;
595 0 : handler_data_add->handler_arg = handler_data->handler_arg;
596 0 : handler_data_add->dm = handler_data->dm;
597 0 : handler_data_add->irq_source = irq_source;
598 :
599 0 : list_add_tail(&handler_data_add->list, handler_list);
600 :
601 0 : INIT_WORK(&handler_data_add->work, dm_irq_work_func);
602 :
603 0 : if (queue_work(system_highpri_wq, &handler_data_add->work))
604 0 : DRM_DEBUG("Queued work for handling interrupt from "
605 : "display for IRQ source %d\n",
606 : irq_source);
607 : else
608 0 : DRM_ERROR("Failed to queue work for handling interrupt "
609 : "from display for IRQ source %d\n",
610 : irq_source);
611 : }
612 : }
613 :
614 : /*
615 : * amdgpu_dm_irq_immediate_work
616 : * Callback high irq work immediately, don't send to work queue
617 : */
618 0 : static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
619 : enum dc_irq_source irq_source)
620 : {
621 : struct amdgpu_dm_irq_handler_data *handler_data;
622 : unsigned long irq_table_flags;
623 :
624 0 : DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
625 :
626 0 : list_for_each_entry(handler_data,
627 : &adev->dm.irq_handler_list_high_tab[irq_source],
628 : list) {
629 : /* Call a subcomponent which registered for immediate
630 : * interrupt notification */
631 0 : handler_data->handler(handler_data->handler_arg);
632 : }
633 :
634 0 : DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
635 0 : }
636 :
637 : /**
638 : * amdgpu_dm_irq_handler - Generic DM IRQ handler
639 : * @adev: amdgpu base driver device containing the DM device
640 : * @source: Unused
641 : * @entry: Data about the triggered interrupt
642 : *
643 : * Calls all registered high irq work immediately, and schedules work for low
644 : * irq. The DM IRQ table is used to find the corresponding handlers.
645 : */
646 0 : static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
647 : struct amdgpu_irq_src *source,
648 : struct amdgpu_iv_entry *entry)
649 : {
650 :
651 0 : enum dc_irq_source src =
652 0 : dc_interrupt_to_irq_source(
653 : adev->dm.dc,
654 : entry->src_id,
655 : entry->src_data[0]);
656 :
657 0 : dc_interrupt_ack(adev->dm.dc, src);
658 :
659 : /* Call high irq work immediately */
660 0 : amdgpu_dm_irq_immediate_work(adev, src);
661 : /*Schedule low_irq work */
662 0 : amdgpu_dm_irq_schedule_work(adev, src);
663 :
664 0 : return 0;
665 : }
666 :
667 : static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
668 : {
669 : switch (type) {
670 : case AMDGPU_HPD_1:
671 : return DC_IRQ_SOURCE_HPD1;
672 : case AMDGPU_HPD_2:
673 : return DC_IRQ_SOURCE_HPD2;
674 : case AMDGPU_HPD_3:
675 : return DC_IRQ_SOURCE_HPD3;
676 : case AMDGPU_HPD_4:
677 : return DC_IRQ_SOURCE_HPD4;
678 : case AMDGPU_HPD_5:
679 : return DC_IRQ_SOURCE_HPD5;
680 : case AMDGPU_HPD_6:
681 : return DC_IRQ_SOURCE_HPD6;
682 : default:
683 : return DC_IRQ_SOURCE_INVALID;
684 : }
685 : }
686 :
687 0 : static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
688 : struct amdgpu_irq_src *source,
689 : unsigned type,
690 : enum amdgpu_interrupt_state state)
691 : {
692 0 : enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
693 0 : bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
694 :
695 0 : dc_interrupt_set(adev->dm.dc, src, st);
696 0 : return 0;
697 : }
698 :
699 0 : static inline int dm_irq_state(struct amdgpu_device *adev,
700 : struct amdgpu_irq_src *source,
701 : unsigned crtc_id,
702 : enum amdgpu_interrupt_state state,
703 : const enum irq_type dal_irq_type,
704 : const char *func)
705 : {
706 : bool st;
707 : enum dc_irq_source irq_source;
708 :
709 0 : struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
710 :
711 0 : if (!acrtc) {
712 0 : DRM_ERROR(
713 : "%s: crtc is NULL at id :%d\n",
714 : func,
715 : crtc_id);
716 : return 0;
717 : }
718 :
719 0 : if (acrtc->otg_inst == -1)
720 : return 0;
721 :
722 0 : irq_source = dal_irq_type + acrtc->otg_inst;
723 :
724 0 : st = (state == AMDGPU_IRQ_STATE_ENABLE);
725 :
726 0 : dc_interrupt_set(adev->dm.dc, irq_source, st);
727 : return 0;
728 : }
729 :
730 0 : static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
731 : struct amdgpu_irq_src *source,
732 : unsigned crtc_id,
733 : enum amdgpu_interrupt_state state)
734 : {
735 0 : return dm_irq_state(
736 : adev,
737 : source,
738 : crtc_id,
739 : state,
740 : IRQ_TYPE_PFLIP,
741 : __func__);
742 : }
743 :
744 0 : static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
745 : struct amdgpu_irq_src *source,
746 : unsigned crtc_id,
747 : enum amdgpu_interrupt_state state)
748 : {
749 0 : return dm_irq_state(
750 : adev,
751 : source,
752 : crtc_id,
753 : state,
754 : IRQ_TYPE_VBLANK,
755 : __func__);
756 : }
757 :
758 0 : static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
759 : struct amdgpu_irq_src *source,
760 : unsigned int crtc_id,
761 : enum amdgpu_interrupt_state state)
762 : {
763 0 : return dm_irq_state(
764 : adev,
765 : source,
766 : crtc_id,
767 : state,
768 : IRQ_TYPE_VLINE0,
769 : __func__);
770 : }
771 :
772 0 : static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
773 : struct amdgpu_irq_src *source,
774 : unsigned int crtc_id,
775 : enum amdgpu_interrupt_state state)
776 : {
777 0 : enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
778 0 : bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
779 :
780 0 : dc_interrupt_set(adev->dm.dc, irq_source, st);
781 0 : return 0;
782 : }
783 :
784 0 : static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
785 : struct amdgpu_irq_src *source,
786 : unsigned int crtc_id,
787 : enum amdgpu_interrupt_state state)
788 : {
789 0 : return dm_irq_state(
790 : adev,
791 : source,
792 : crtc_id,
793 : state,
794 : IRQ_TYPE_VUPDATE,
795 : __func__);
796 : }
797 :
798 0 : static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
799 : struct amdgpu_irq_src *source,
800 : unsigned int type,
801 : enum amdgpu_interrupt_state state)
802 : {
803 0 : enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
804 0 : bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
805 :
806 0 : dc_interrupt_set(adev->dm.dc, irq_source, st);
807 0 : return 0;
808 : }
809 :
810 : static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
811 : .set = amdgpu_dm_set_crtc_irq_state,
812 : .process = amdgpu_dm_irq_handler,
813 : };
814 :
815 : static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
816 : .set = amdgpu_dm_set_vline0_irq_state,
817 : .process = amdgpu_dm_irq_handler,
818 : };
819 :
820 : static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
821 : .set = amdgpu_dm_set_dmub_outbox_irq_state,
822 : .process = amdgpu_dm_irq_handler,
823 : };
824 :
825 : static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
826 : .set = amdgpu_dm_set_vupdate_irq_state,
827 : .process = amdgpu_dm_irq_handler,
828 : };
829 :
830 : static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
831 : .set = amdgpu_dm_set_dmub_trace_irq_state,
832 : .process = amdgpu_dm_irq_handler,
833 : };
834 :
835 : static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
836 : .set = amdgpu_dm_set_pflip_irq_state,
837 : .process = amdgpu_dm_irq_handler,
838 : };
839 :
840 : static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
841 : .set = amdgpu_dm_set_hpd_irq_state,
842 : .process = amdgpu_dm_irq_handler,
843 : };
844 :
845 0 : void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
846 : {
847 0 : adev->crtc_irq.num_types = adev->mode_info.num_crtc;
848 0 : adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
849 :
850 0 : adev->vline0_irq.num_types = adev->mode_info.num_crtc;
851 0 : adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
852 :
853 0 : adev->dmub_outbox_irq.num_types = 1;
854 0 : adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
855 :
856 0 : adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
857 0 : adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
858 :
859 0 : adev->dmub_trace_irq.num_types = 1;
860 0 : adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
861 :
862 0 : adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
863 0 : adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
864 :
865 0 : adev->hpd_irq.num_types = adev->mode_info.num_hpd;
866 0 : adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
867 0 : }
868 0 : void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
869 : {
870 0 : dc_interrupt_set(adev->dm.dc,
871 : DC_IRQ_SOURCE_DMCUB_OUTBOX,
872 : true);
873 0 : }
874 :
875 : /**
876 : * amdgpu_dm_hpd_init - hpd setup callback.
877 : *
878 : * @adev: amdgpu_device pointer
879 : *
880 : * Setup the hpd pins used by the card (evergreen+).
881 : * Enable the pin, set the polarity, and enable the hpd interrupts.
882 : */
883 0 : void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
884 : {
885 0 : struct drm_device *dev = adev_to_drm(adev);
886 : struct drm_connector *connector;
887 : struct drm_connector_list_iter iter;
888 :
889 0 : drm_connector_list_iter_begin(dev, &iter);
890 0 : drm_for_each_connector_iter(connector, &iter) {
891 0 : struct amdgpu_dm_connector *amdgpu_dm_connector =
892 0 : to_amdgpu_dm_connector(connector);
893 :
894 0 : const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
895 :
896 0 : if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
897 0 : dc_interrupt_set(adev->dm.dc,
898 : dc_link->irq_source_hpd,
899 : true);
900 : }
901 :
902 0 : if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
903 0 : dc_interrupt_set(adev->dm.dc,
904 : dc_link->irq_source_hpd_rx,
905 : true);
906 : }
907 : }
908 0 : drm_connector_list_iter_end(&iter);
909 0 : }
910 :
911 : /**
912 : * amdgpu_dm_hpd_fini - hpd tear down callback.
913 : *
914 : * @adev: amdgpu_device pointer
915 : *
916 : * Tear down the hpd pins used by the card (evergreen+).
917 : * Disable the hpd interrupts.
918 : */
919 0 : void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
920 : {
921 0 : struct drm_device *dev = adev_to_drm(adev);
922 : struct drm_connector *connector;
923 : struct drm_connector_list_iter iter;
924 :
925 0 : drm_connector_list_iter_begin(dev, &iter);
926 0 : drm_for_each_connector_iter(connector, &iter) {
927 0 : struct amdgpu_dm_connector *amdgpu_dm_connector =
928 0 : to_amdgpu_dm_connector(connector);
929 0 : const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
930 :
931 0 : if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
932 0 : dc_interrupt_set(adev->dm.dc,
933 : dc_link->irq_source_hpd,
934 : false);
935 : }
936 :
937 0 : if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
938 0 : dc_interrupt_set(adev->dm.dc,
939 : dc_link->irq_source_hpd_rx,
940 : false);
941 : }
942 : }
943 0 : drm_connector_list_iter_end(&iter);
944 0 : }
|