LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_irq.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 224 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 24 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2008 Advanced Micro Devices, Inc.
       3             :  * Copyright 2008 Red Hat Inc.
       4             :  * Copyright 2009 Jerome Glisse.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the "Software"),
       8             :  * to deal in the Software without restriction, including without limitation
       9             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      10             :  * and/or sell copies of the Software, and to permit persons to whom the
      11             :  * Software is furnished to do so, subject to the following conditions:
      12             :  *
      13             :  * The above copyright notice and this permission notice shall be included in
      14             :  * all copies or substantial portions of the Software.
      15             :  *
      16             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      17             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      18             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      19             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      20             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      21             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      22             :  * OTHER DEALINGS IN THE SOFTWARE.
      23             :  *
      24             :  * Authors: Dave Airlie
      25             :  *          Alex Deucher
      26             :  *          Jerome Glisse
      27             :  */
      28             : 
      29             : /**
      30             :  * DOC: Interrupt Handling
      31             :  *
      32             :  * Interrupts generated within GPU hardware raise interrupt requests that are
      33             :  * passed to amdgpu IRQ handler which is responsible for detecting source and
      34             :  * type of the interrupt and dispatching matching handlers. If handling an
      35             :  * interrupt requires calling kernel functions that may sleep processing is
      36             :  * dispatched to work handlers.
      37             :  *
      38             :  * If MSI functionality is not disabled by module parameter then MSI
      39             :  * support will be enabled.
      40             :  *
      41             :  * For GPU interrupt sources that may be driven by another driver, IRQ domain
      42             :  * support is used (with mapping between virtual and hardware IRQs).
      43             :  */
      44             : 
      45             : #include <linux/irq.h>
      46             : #include <linux/pci.h>
      47             : 
      48             : #include <drm/drm_crtc_helper.h>
      49             : #include <drm/drm_vblank.h>
      50             : #include <drm/amdgpu_drm.h>
      51             : #include <drm/drm_drv.h>
      52             : #include "amdgpu.h"
      53             : #include "amdgpu_ih.h"
      54             : #include "atom.h"
      55             : #include "amdgpu_connectors.h"
      56             : #include "amdgpu_trace.h"
      57             : #include "amdgpu_amdkfd.h"
      58             : #include "amdgpu_ras.h"
      59             : 
      60             : #include <linux/pm_runtime.h>
      61             : 
      62             : #ifdef CONFIG_DRM_AMD_DC
      63             : #include "amdgpu_dm_irq.h"
      64             : #endif
      65             : 
      66             : #define AMDGPU_WAIT_IDLE_TIMEOUT 200
      67             : 
      68             : const char *soc15_ih_clientid_name[] = {
      69             :         "IH",
      70             :         "SDMA2 or ACP",
      71             :         "ATHUB",
      72             :         "BIF",
      73             :         "SDMA3 or DCE",
      74             :         "SDMA4 or ISP",
      75             :         "VMC1 or PCIE0",
      76             :         "RLC",
      77             :         "SDMA0",
      78             :         "SDMA1",
      79             :         "SE0SH",
      80             :         "SE1SH",
      81             :         "SE2SH",
      82             :         "SE3SH",
      83             :         "VCN1 or UVD1",
      84             :         "THM",
      85             :         "VCN or UVD",
      86             :         "SDMA5 or VCE0",
      87             :         "VMC",
      88             :         "SDMA6 or XDMA",
      89             :         "GRBM_CP",
      90             :         "ATS",
      91             :         "ROM_SMUIO",
      92             :         "DF",
      93             :         "SDMA7 or VCE1",
      94             :         "PWR",
      95             :         "reserved",
      96             :         "UTCL2",
      97             :         "EA",
      98             :         "UTCL2LOG",
      99             :         "MP0",
     100             :         "MP1"
     101             : };
     102             : 
     103             : /**
     104             :  * amdgpu_hotplug_work_func - work handler for display hotplug event
     105             :  *
     106             :  * @work: work struct pointer
     107             :  *
     108             :  * This is the hotplug event work handler (all ASICs).
     109             :  * The work gets scheduled from the IRQ handler if there
     110             :  * was a hotplug interrupt.  It walks through the connector table
     111             :  * and calls hotplug handler for each connector. After this, it sends
     112             :  * a DRM hotplug event to alert userspace.
     113             :  *
     114             :  * This design approach is required in order to defer hotplug event handling
     115             :  * from the IRQ handler to a work handler because hotplug handler has to use
     116             :  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
     117             :  * sleep).
     118             :  */
     119           0 : static void amdgpu_hotplug_work_func(struct work_struct *work)
     120             : {
     121           0 :         struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
     122             :                                                   hotplug_work);
     123           0 :         struct drm_device *dev = adev_to_drm(adev);
     124           0 :         struct drm_mode_config *mode_config = &dev->mode_config;
     125             :         struct drm_connector *connector;
     126             :         struct drm_connector_list_iter iter;
     127             : 
     128           0 :         mutex_lock(&mode_config->mutex);
     129           0 :         drm_connector_list_iter_begin(dev, &iter);
     130           0 :         drm_for_each_connector_iter(connector, &iter)
     131           0 :                 amdgpu_connector_hotplug(connector);
     132           0 :         drm_connector_list_iter_end(&iter);
     133           0 :         mutex_unlock(&mode_config->mutex);
     134             :         /* Just fire off a uevent and let userspace tell us what to do */
     135           0 :         drm_helper_hpd_irq_event(dev);
     136           0 : }
     137             : 
     138             : /**
     139             :  * amdgpu_irq_disable_all - disable *all* interrupts
     140             :  *
     141             :  * @adev: amdgpu device pointer
     142             :  *
     143             :  * Disable all types of interrupts from all sources.
     144             :  */
     145           0 : void amdgpu_irq_disable_all(struct amdgpu_device *adev)
     146             : {
     147             :         unsigned long irqflags;
     148             :         unsigned i, j, k;
     149             :         int r;
     150             : 
     151           0 :         spin_lock_irqsave(&adev->irq.lock, irqflags);
     152           0 :         for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
     153           0 :                 if (!adev->irq.client[i].sources)
     154           0 :                         continue;
     155             : 
     156           0 :                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
     157           0 :                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
     158             : 
     159           0 :                         if (!src || !src->funcs->set || !src->num_types)
     160           0 :                                 continue;
     161             : 
     162           0 :                         for (k = 0; k < src->num_types; ++k) {
     163           0 :                                 atomic_set(&src->enabled_types[k], 0);
     164           0 :                                 r = src->funcs->set(adev, src, k,
     165             :                                                     AMDGPU_IRQ_STATE_DISABLE);
     166           0 :                                 if (r)
     167           0 :                                         DRM_ERROR("error disabling interrupt (%d)\n",
     168             :                                                   r);
     169             :                         }
     170             :                 }
     171             :         }
     172           0 :         spin_unlock_irqrestore(&adev->irq.lock, irqflags);
     173           0 : }
     174             : 
     175             : /**
     176             :  * amdgpu_irq_handler - IRQ handler
     177             :  *
     178             :  * @irq: IRQ number (unused)
     179             :  * @arg: pointer to DRM device
     180             :  *
     181             :  * IRQ handler for amdgpu driver (all ASICs).
     182             :  *
     183             :  * Returns:
     184             :  * result of handling the IRQ, as defined by &irqreturn_t
     185             :  */
     186           0 : static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
     187             : {
     188           0 :         struct drm_device *dev = (struct drm_device *) arg;
     189           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
     190             :         irqreturn_t ret;
     191             : 
     192           0 :         ret = amdgpu_ih_process(adev, &adev->irq.ih);
     193           0 :         if (ret == IRQ_HANDLED)
     194           0 :                 pm_runtime_mark_last_busy(dev->dev);
     195             : 
     196           0 :         amdgpu_ras_interrupt_fatal_error_handler(adev);
     197             : 
     198           0 :         return ret;
     199             : }
     200             : 
     201             : /**
     202             :  * amdgpu_irq_handle_ih1 - kick of processing for IH1
     203             :  *
     204             :  * @work: work structure in struct amdgpu_irq
     205             :  *
     206             :  * Kick of processing IH ring 1.
     207             :  */
     208           0 : static void amdgpu_irq_handle_ih1(struct work_struct *work)
     209             : {
     210           0 :         struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
     211             :                                                   irq.ih1_work);
     212             : 
     213           0 :         amdgpu_ih_process(adev, &adev->irq.ih1);
     214           0 : }
     215             : 
     216             : /**
     217             :  * amdgpu_irq_handle_ih2 - kick of processing for IH2
     218             :  *
     219             :  * @work: work structure in struct amdgpu_irq
     220             :  *
     221             :  * Kick of processing IH ring 2.
     222             :  */
     223           0 : static void amdgpu_irq_handle_ih2(struct work_struct *work)
     224             : {
     225           0 :         struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
     226             :                                                   irq.ih2_work);
     227             : 
     228           0 :         amdgpu_ih_process(adev, &adev->irq.ih2);
     229           0 : }
     230             : 
     231             : /**
     232             :  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
     233             :  *
     234             :  * @work: work structure in struct amdgpu_irq
     235             :  *
     236             :  * Kick of processing IH soft ring.
     237             :  */
     238           0 : static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
     239             : {
     240           0 :         struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
     241             :                                                   irq.ih_soft_work);
     242             : 
     243           0 :         amdgpu_ih_process(adev, &adev->irq.ih_soft);
     244           0 : }
     245             : 
     246             : /**
     247             :  * amdgpu_msi_ok - check whether MSI functionality is enabled
     248             :  *
     249             :  * @adev: amdgpu device pointer (unused)
     250             :  *
     251             :  * Checks whether MSI functionality has been disabled via module parameter
     252             :  * (all ASICs).
     253             :  *
     254             :  * Returns:
     255             :  * *true* if MSIs are allowed to be enabled or *false* otherwise
     256             :  */
     257             : static bool amdgpu_msi_ok(struct amdgpu_device *adev)
     258             : {
     259           0 :         if (amdgpu_msi == 1)
     260             :                 return true;
     261           0 :         else if (amdgpu_msi == 0)
     262             :                 return false;
     263             : 
     264             :         return true;
     265             : }
     266             : 
     267           0 : static void amdgpu_restore_msix(struct amdgpu_device *adev)
     268             : {
     269             :         u16 ctrl;
     270             : 
     271           0 :         pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
     272           0 :         if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
     273           0 :                 return;
     274             : 
     275             :         /* VF FLR */
     276           0 :         ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
     277           0 :         pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
     278           0 :         ctrl |= PCI_MSIX_FLAGS_ENABLE;
     279           0 :         pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
     280             : }
     281             : 
     282             : /**
     283             :  * amdgpu_irq_init - initialize interrupt handling
     284             :  *
     285             :  * @adev: amdgpu device pointer
     286             :  *
     287             :  * Sets up work functions for hotplug and reset interrupts, enables MSI
     288             :  * functionality, initializes vblank, hotplug and reset interrupt handling.
     289             :  *
     290             :  * Returns:
     291             :  * 0 on success or error code on failure
     292             :  */
     293           0 : int amdgpu_irq_init(struct amdgpu_device *adev)
     294             : {
     295           0 :         int r = 0;
     296             :         unsigned int irq;
     297             : 
     298           0 :         spin_lock_init(&adev->irq.lock);
     299             : 
     300             :         /* Enable MSI if not disabled by module parameter */
     301           0 :         adev->irq.msi_enabled = false;
     302             : 
     303           0 :         if (amdgpu_msi_ok(adev)) {
     304           0 :                 int nvec = pci_msix_vec_count(adev->pdev);
     305             :                 unsigned int flags;
     306             : 
     307           0 :                 if (nvec <= 0) {
     308             :                         flags = PCI_IRQ_MSI;
     309             :                 } else {
     310           0 :                         flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
     311             :                 }
     312             :                 /* we only need one vector */
     313           0 :                 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
     314           0 :                 if (nvec > 0) {
     315           0 :                         adev->irq.msi_enabled = true;
     316             :                         dev_dbg(adev->dev, "using MSI/MSI-X.\n");
     317             :                 }
     318             :         }
     319             : 
     320           0 :         if (!amdgpu_device_has_dc_support(adev)) {
     321           0 :                 if (!adev->enable_virtual_display)
     322             :                         /* Disable vblank IRQs aggressively for power-saving */
     323             :                         /* XXX: can this be enabled for DC? */
     324           0 :                         adev_to_drm(adev)->vblank_disable_immediate = true;
     325             : 
     326           0 :                 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
     327           0 :                 if (r)
     328             :                         return r;
     329             : 
     330             :                 /* Pre-DCE11 */
     331           0 :                 INIT_WORK(&adev->hotplug_work,
     332             :                                 amdgpu_hotplug_work_func);
     333             :         }
     334             : 
     335           0 :         INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
     336           0 :         INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
     337           0 :         INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
     338             : 
     339             :         /* Use vector 0 for MSI-X. */
     340           0 :         r = pci_irq_vector(adev->pdev, 0);
     341           0 :         if (r < 0)
     342             :                 return r;
     343           0 :         irq = r;
     344             : 
     345             :         /* PCI devices require shared interrupts. */
     346           0 :         r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
     347           0 :                         adev_to_drm(adev));
     348           0 :         if (r) {
     349           0 :                 if (!amdgpu_device_has_dc_support(adev))
     350           0 :                         flush_work(&adev->hotplug_work);
     351             :                 return r;
     352             :         }
     353           0 :         adev->irq.installed = true;
     354           0 :         adev->irq.irq = irq;
     355           0 :         adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
     356             : 
     357           0 :         DRM_DEBUG("amdgpu: irq initialized.\n");
     358           0 :         return 0;
     359             : }
     360             : 
     361             : 
     362           0 : void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
     363             : {
     364           0 :         if (adev->irq.installed) {
     365           0 :                 free_irq(adev->irq.irq, adev_to_drm(adev));
     366           0 :                 adev->irq.installed = false;
     367           0 :                 if (adev->irq.msi_enabled)
     368           0 :                         pci_free_irq_vectors(adev->pdev);
     369             : 
     370           0 :                 if (!amdgpu_device_has_dc_support(adev))
     371           0 :                         flush_work(&adev->hotplug_work);
     372             :         }
     373             : 
     374           0 :         amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
     375           0 :         amdgpu_ih_ring_fini(adev, &adev->irq.ih);
     376           0 :         amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
     377           0 :         amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
     378           0 : }
     379             : 
     380             : /**
     381             :  * amdgpu_irq_fini_sw - shut down interrupt handling
     382             :  *
     383             :  * @adev: amdgpu device pointer
     384             :  *
     385             :  * Tears down work functions for hotplug and reset interrupts, disables MSI
     386             :  * functionality, shuts down vblank, hotplug and reset interrupt handling,
     387             :  * turns off interrupts from all sources (all ASICs).
     388             :  */
     389           0 : void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
     390             : {
     391             :         unsigned i, j;
     392             : 
     393           0 :         for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
     394           0 :                 if (!adev->irq.client[i].sources)
     395           0 :                         continue;
     396             : 
     397           0 :                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
     398           0 :                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
     399             : 
     400           0 :                         if (!src)
     401           0 :                                 continue;
     402             : 
     403           0 :                         kfree(src->enabled_types);
     404           0 :                         src->enabled_types = NULL;
     405             :                 }
     406           0 :                 kfree(adev->irq.client[i].sources);
     407           0 :                 adev->irq.client[i].sources = NULL;
     408             :         }
     409           0 : }
     410             : 
     411             : /**
     412             :  * amdgpu_irq_add_id - register IRQ source
     413             :  *
     414             :  * @adev: amdgpu device pointer
     415             :  * @client_id: client id
     416             :  * @src_id: source id
     417             :  * @source: IRQ source pointer
     418             :  *
     419             :  * Registers IRQ source on a client.
     420             :  *
     421             :  * Returns:
     422             :  * 0 on success or error code otherwise
     423             :  */
     424           0 : int amdgpu_irq_add_id(struct amdgpu_device *adev,
     425             :                       unsigned client_id, unsigned src_id,
     426             :                       struct amdgpu_irq_src *source)
     427             : {
     428           0 :         if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
     429             :                 return -EINVAL;
     430             : 
     431           0 :         if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
     432             :                 return -EINVAL;
     433             : 
     434           0 :         if (!source->funcs)
     435             :                 return -EINVAL;
     436             : 
     437           0 :         if (!adev->irq.client[client_id].sources) {
     438           0 :                 adev->irq.client[client_id].sources =
     439           0 :                         kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
     440             :                                 sizeof(struct amdgpu_irq_src *),
     441             :                                 GFP_KERNEL);
     442           0 :                 if (!adev->irq.client[client_id].sources)
     443             :                         return -ENOMEM;
     444             :         }
     445             : 
     446           0 :         if (adev->irq.client[client_id].sources[src_id] != NULL)
     447             :                 return -EINVAL;
     448             : 
     449           0 :         if (source->num_types && !source->enabled_types) {
     450             :                 atomic_t *types;
     451             : 
     452           0 :                 types = kcalloc(source->num_types, sizeof(atomic_t),
     453             :                                 GFP_KERNEL);
     454           0 :                 if (!types)
     455             :                         return -ENOMEM;
     456             : 
     457           0 :                 source->enabled_types = types;
     458             :         }
     459             : 
     460           0 :         adev->irq.client[client_id].sources[src_id] = source;
     461           0 :         return 0;
     462             : }
     463             : 
     464             : /**
     465             :  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
     466             :  *
     467             :  * @adev: amdgpu device pointer
     468             :  * @ih: interrupt ring instance
     469             :  *
     470             :  * Dispatches IRQ to IP blocks.
     471             :  */
     472           0 : void amdgpu_irq_dispatch(struct amdgpu_device *adev,
     473             :                          struct amdgpu_ih_ring *ih)
     474             : {
     475           0 :         u32 ring_index = ih->rptr >> 2;
     476             :         struct amdgpu_iv_entry entry;
     477             :         unsigned client_id, src_id;
     478             :         struct amdgpu_irq_src *src;
     479           0 :         bool handled = false;
     480             :         int r;
     481             : 
     482           0 :         entry.ih = ih;
     483           0 :         entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
     484           0 :         amdgpu_ih_decode_iv(adev, &entry);
     485             : 
     486           0 :         trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
     487             : 
     488           0 :         client_id = entry.client_id;
     489           0 :         src_id = entry.src_id;
     490             : 
     491           0 :         if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
     492           0 :                 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
     493             : 
     494           0 :         } else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
     495           0 :                 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
     496             : 
     497           0 :         } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
     498           0 :                    adev->irq.virq[src_id]) {
     499           0 :                 generic_handle_domain_irq(adev->irq.domain, src_id);
     500             : 
     501           0 :         } else if (!adev->irq.client[client_id].sources) {
     502           0 :                 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
     503             :                           client_id, src_id);
     504             : 
     505           0 :         } else if ((src = adev->irq.client[client_id].sources[src_id])) {
     506           0 :                 r = src->funcs->process(adev, src, &entry);
     507           0 :                 if (r < 0)
     508           0 :                         DRM_ERROR("error processing interrupt (%d)\n", r);
     509           0 :                 else if (r)
     510           0 :                         handled = true;
     511             : 
     512             :         } else {
     513           0 :                 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
     514             :         }
     515             : 
     516             :         /* Send it to amdkfd as well if it isn't already handled */
     517           0 :         if (!handled)
     518           0 :                 amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
     519             : 
     520           0 :         if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
     521           0 :                 ih->processed_timestamp = entry.timestamp;
     522           0 : }
     523             : 
     524             : /**
     525             :  * amdgpu_irq_delegate - delegate IV to soft IH ring
     526             :  *
     527             :  * @adev: amdgpu device pointer
     528             :  * @entry: IV entry
     529             :  * @num_dw: size of IV
     530             :  *
     531             :  * Delegate the IV to the soft IH ring and schedule processing of it. Used
     532             :  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
     533             :  */
     534           0 : void amdgpu_irq_delegate(struct amdgpu_device *adev,
     535             :                          struct amdgpu_iv_entry *entry,
     536             :                          unsigned int num_dw)
     537             : {
     538           0 :         amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
     539           0 :         schedule_work(&adev->irq.ih_soft_work);
     540           0 : }
     541             : 
     542             : /**
     543             :  * amdgpu_irq_update - update hardware interrupt state
     544             :  *
     545             :  * @adev: amdgpu device pointer
     546             :  * @src: interrupt source pointer
     547             :  * @type: type of interrupt
     548             :  *
     549             :  * Updates interrupt state for the specific source (all ASICs).
     550             :  */
     551           0 : int amdgpu_irq_update(struct amdgpu_device *adev,
     552             :                              struct amdgpu_irq_src *src, unsigned type)
     553             : {
     554             :         unsigned long irqflags;
     555             :         enum amdgpu_interrupt_state state;
     556             :         int r;
     557             : 
     558           0 :         spin_lock_irqsave(&adev->irq.lock, irqflags);
     559             : 
     560             :         /* We need to determine after taking the lock, otherwise
     561             :            we might disable just enabled interrupts again */
     562           0 :         if (amdgpu_irq_enabled(adev, src, type))
     563             :                 state = AMDGPU_IRQ_STATE_ENABLE;
     564             :         else
     565           0 :                 state = AMDGPU_IRQ_STATE_DISABLE;
     566             : 
     567           0 :         r = src->funcs->set(adev, src, type, state);
     568           0 :         spin_unlock_irqrestore(&adev->irq.lock, irqflags);
     569           0 :         return r;
     570             : }
     571             : 
     572             : /**
     573             :  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
     574             :  *
     575             :  * @adev: amdgpu device pointer
     576             :  *
     577             :  * Updates state of all types of interrupts on all sources on resume after
     578             :  * reset.
     579             :  */
     580           0 : void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
     581             : {
     582             :         int i, j, k;
     583             : 
     584           0 :         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
     585           0 :                 amdgpu_restore_msix(adev);
     586             : 
     587           0 :         for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
     588           0 :                 if (!adev->irq.client[i].sources)
     589           0 :                         continue;
     590             : 
     591           0 :                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
     592           0 :                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
     593             : 
     594           0 :                         if (!src || !src->funcs || !src->funcs->set)
     595           0 :                                 continue;
     596           0 :                         for (k = 0; k < src->num_types; k++)
     597           0 :                                 amdgpu_irq_update(adev, src, k);
     598             :                 }
     599             :         }
     600           0 : }
     601             : 
     602             : /**
     603             :  * amdgpu_irq_get - enable interrupt
     604             :  *
     605             :  * @adev: amdgpu device pointer
     606             :  * @src: interrupt source pointer
     607             :  * @type: type of interrupt
     608             :  *
     609             :  * Enables specified type of interrupt on the specified source (all ASICs).
     610             :  *
     611             :  * Returns:
     612             :  * 0 on success or error code otherwise
     613             :  */
     614           0 : int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
     615             :                    unsigned type)
     616             : {
     617           0 :         if (!adev->irq.installed)
     618             :                 return -ENOENT;
     619             : 
     620           0 :         if (type >= src->num_types)
     621             :                 return -EINVAL;
     622             : 
     623           0 :         if (!src->enabled_types || !src->funcs->set)
     624             :                 return -EINVAL;
     625             : 
     626           0 :         if (atomic_inc_return(&src->enabled_types[type]) == 1)
     627           0 :                 return amdgpu_irq_update(adev, src, type);
     628             : 
     629             :         return 0;
     630             : }
     631             : 
     632             : /**
     633             :  * amdgpu_irq_put - disable interrupt
     634             :  *
     635             :  * @adev: amdgpu device pointer
     636             :  * @src: interrupt source pointer
     637             :  * @type: type of interrupt
     638             :  *
     639             :  * Enables specified type of interrupt on the specified source (all ASICs).
     640             :  *
     641             :  * Returns:
     642             :  * 0 on success or error code otherwise
     643             :  */
     644           0 : int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
     645             :                    unsigned type)
     646             : {
     647           0 :         if (!adev->irq.installed)
     648             :                 return -ENOENT;
     649             : 
     650           0 :         if (type >= src->num_types)
     651             :                 return -EINVAL;
     652             : 
     653           0 :         if (!src->enabled_types || !src->funcs->set)
     654             :                 return -EINVAL;
     655             : 
     656           0 :         if (atomic_dec_and_test(&src->enabled_types[type]))
     657           0 :                 return amdgpu_irq_update(adev, src, type);
     658             : 
     659             :         return 0;
     660             : }
     661             : 
     662             : /**
     663             :  * amdgpu_irq_enabled - check whether interrupt is enabled or not
     664             :  *
     665             :  * @adev: amdgpu device pointer
     666             :  * @src: interrupt source pointer
     667             :  * @type: type of interrupt
     668             :  *
     669             :  * Checks whether the given type of interrupt is enabled on the given source.
     670             :  *
     671             :  * Returns:
     672             :  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
     673             :  * invalid parameters
     674             :  */
     675           0 : bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
     676             :                         unsigned type)
     677             : {
     678           0 :         if (!adev->irq.installed)
     679             :                 return false;
     680             : 
     681           0 :         if (type >= src->num_types)
     682             :                 return false;
     683             : 
     684           0 :         if (!src->enabled_types || !src->funcs->set)
     685             :                 return false;
     686             : 
     687           0 :         return !!atomic_read(&src->enabled_types[type]);
     688             : }
     689             : 
     690             : /* XXX: Generic IRQ handling */
     691           0 : static void amdgpu_irq_mask(struct irq_data *irqd)
     692             : {
     693             :         /* XXX */
     694           0 : }
     695             : 
     696           0 : static void amdgpu_irq_unmask(struct irq_data *irqd)
     697             : {
     698             :         /* XXX */
     699           0 : }
     700             : 
     701             : /* amdgpu hardware interrupt chip descriptor */
     702             : static struct irq_chip amdgpu_irq_chip = {
     703             :         .name = "amdgpu-ih",
     704             :         .irq_mask = amdgpu_irq_mask,
     705             :         .irq_unmask = amdgpu_irq_unmask,
     706             : };
     707             : 
     708             : /**
     709             :  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
     710             :  *
     711             :  * @d: amdgpu IRQ domain pointer (unused)
     712             :  * @irq: virtual IRQ number
     713             :  * @hwirq: hardware irq number
     714             :  *
     715             :  * Current implementation assigns simple interrupt handler to the given virtual
     716             :  * IRQ.
     717             :  *
     718             :  * Returns:
     719             :  * 0 on success or error code otherwise
     720             :  */
     721           0 : static int amdgpu_irqdomain_map(struct irq_domain *d,
     722             :                                 unsigned int irq, irq_hw_number_t hwirq)
     723             : {
     724           0 :         if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
     725             :                 return -EPERM;
     726             : 
     727           0 :         irq_set_chip_and_handler(irq,
     728             :                                  &amdgpu_irq_chip, handle_simple_irq);
     729           0 :         return 0;
     730             : }
     731             : 
     732             : /* Implementation of methods for amdgpu IRQ domain */
     733             : static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
     734             :         .map = amdgpu_irqdomain_map,
     735             : };
     736             : 
     737             : /**
     738             :  * amdgpu_irq_add_domain - create a linear IRQ domain
     739             :  *
     740             :  * @adev: amdgpu device pointer
     741             :  *
     742             :  * Creates an IRQ domain for GPU interrupt sources
     743             :  * that may be driven by another driver (e.g., ACP).
     744             :  *
     745             :  * Returns:
     746             :  * 0 on success or error code otherwise
     747             :  */
     748           0 : int amdgpu_irq_add_domain(struct amdgpu_device *adev)
     749             : {
     750           0 :         adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
     751             :                                                  &amdgpu_hw_irqdomain_ops, adev);
     752           0 :         if (!adev->irq.domain) {
     753           0 :                 DRM_ERROR("GPU irq add domain failed\n");
     754           0 :                 return -ENODEV;
     755             :         }
     756             : 
     757             :         return 0;
     758             : }
     759             : 
     760             : /**
     761             :  * amdgpu_irq_remove_domain - remove the IRQ domain
     762             :  *
     763             :  * @adev: amdgpu device pointer
     764             :  *
     765             :  * Removes the IRQ domain for GPU interrupt sources
     766             :  * that may be driven by another driver (e.g., ACP).
     767             :  */
     768           0 : void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
     769             : {
     770           0 :         if (adev->irq.domain) {
     771           0 :                 irq_domain_remove(adev->irq.domain);
     772           0 :                 adev->irq.domain = NULL;
     773             :         }
     774           0 : }
     775             : 
     776             : /**
     777             :  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
     778             :  *
     779             :  * @adev: amdgpu device pointer
     780             :  * @src_id: IH source id
     781             :  *
     782             :  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
     783             :  * Use this for components that generate a GPU interrupt, but are driven
     784             :  * by a different driver (e.g., ACP).
     785             :  *
     786             :  * Returns:
     787             :  * Linux IRQ
     788             :  */
     789           0 : unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
     790             : {
     791           0 :         adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
     792             : 
     793           0 :         return adev->irq.virq[src_id];
     794             : }

Generated by: LCOV version 1.14