LCOV - code coverage report
Current view: top level - kernel/irq - msi.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 344 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 44 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2014 Intel Corp.
       4             :  * Author: Jiang Liu <jiang.liu@linux.intel.com>
       5             :  *
       6             :  * This file is licensed under GPLv2.
       7             :  *
       8             :  * This file contains common code to support Message Signaled Interrupts for
       9             :  * PCI compatible and non PCI compatible devices.
      10             :  */
      11             : #include <linux/types.h>
      12             : #include <linux/device.h>
      13             : #include <linux/irq.h>
      14             : #include <linux/irqdomain.h>
      15             : #include <linux/msi.h>
      16             : #include <linux/slab.h>
      17             : #include <linux/sysfs.h>
      18             : #include <linux/pci.h>
      19             : 
      20             : #include "internals.h"
      21             : 
      22             : static inline int msi_sysfs_create_group(struct device *dev);
      23             : 
      24             : /**
      25             :  * msi_alloc_desc - Allocate an initialized msi_desc
      26             :  * @dev:        Pointer to the device for which this is allocated
      27             :  * @nvec:       The number of vectors used in this entry
      28             :  * @affinity:   Optional pointer to an affinity mask array size of @nvec
      29             :  *
      30             :  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
      31             :  * and the affinity masks and flags from @affinity are copied.
      32             :  *
      33             :  * Return: pointer to allocated &msi_desc on success or %NULL on failure
      34             :  */
      35           0 : static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
      36             :                                         const struct irq_affinity_desc *affinity)
      37             : {
      38           0 :         struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
      39             : 
      40           0 :         if (!desc)
      41             :                 return NULL;
      42             : 
      43           0 :         desc->dev = dev;
      44           0 :         desc->nvec_used = nvec;
      45           0 :         if (affinity) {
      46           0 :                 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
      47           0 :                 if (!desc->affinity) {
      48           0 :                         kfree(desc);
      49           0 :                         return NULL;
      50             :                 }
      51             :         }
      52             :         return desc;
      53             : }
      54             : 
      55             : static void msi_free_desc(struct msi_desc *desc)
      56             : {
      57           0 :         kfree(desc->affinity);
      58           0 :         kfree(desc);
      59             : }
      60             : 
      61           0 : static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
      62             : {
      63             :         int ret;
      64             : 
      65           0 :         desc->msi_index = index;
      66           0 :         ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
      67           0 :         if (ret)
      68             :                 msi_free_desc(desc);
      69           0 :         return ret;
      70             : }
      71             : 
      72             : /**
      73             :  * msi_add_msi_desc - Allocate and initialize a MSI descriptor
      74             :  * @dev:        Pointer to the device for which the descriptor is allocated
      75             :  * @init_desc:  Pointer to an MSI descriptor to initialize the new descriptor
      76             :  *
      77             :  * Return: 0 on success or an appropriate failure code.
      78             :  */
      79           0 : int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
      80             : {
      81             :         struct msi_desc *desc;
      82             : 
      83             :         lockdep_assert_held(&dev->msi.data->mutex);
      84             : 
      85           0 :         desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
      86           0 :         if (!desc)
      87             :                 return -ENOMEM;
      88             : 
      89             :         /* Copy type specific data to the new descriptor. */
      90           0 :         desc->pci = init_desc->pci;
      91           0 :         return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
      92             : }
      93             : 
      94             : /**
      95             :  * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
      96             :  * @dev:        Pointer to the device for which the descriptors are allocated
      97             :  * @index:      Index for the first MSI descriptor
      98             :  * @ndesc:      Number of descriptors to allocate
      99             :  *
     100             :  * Return: 0 on success or an appropriate failure code.
     101             :  */
     102           0 : static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
     103             : {
     104           0 :         unsigned int idx, last = index + ndesc - 1;
     105             :         struct msi_desc *desc;
     106             :         int ret;
     107             : 
     108             :         lockdep_assert_held(&dev->msi.data->mutex);
     109             : 
     110           0 :         for (idx = index; idx <= last; idx++) {
     111           0 :                 desc = msi_alloc_desc(dev, 1, NULL);
     112           0 :                 if (!desc)
     113             :                         goto fail_mem;
     114           0 :                 ret = msi_insert_desc(dev->msi.data, desc, idx);
     115           0 :                 if (ret)
     116             :                         goto fail;
     117             :         }
     118             :         return 0;
     119             : 
     120             : fail_mem:
     121             :         ret = -ENOMEM;
     122             : fail:
     123           0 :         msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last);
     124           0 :         return ret;
     125             : }
     126             : 
     127           0 : static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
     128             : {
     129           0 :         switch (filter) {
     130             :         case MSI_DESC_ALL:
     131             :                 return true;
     132             :         case MSI_DESC_NOTASSOCIATED:
     133           0 :                 return !desc->irq;
     134             :         case MSI_DESC_ASSOCIATED:
     135           0 :                 return !!desc->irq;
     136             :         }
     137           0 :         WARN_ON_ONCE(1);
     138             :         return false;
     139             : }
     140             : 
     141             : /**
     142             :  * msi_free_msi_descs_range - Free MSI descriptors of a device
     143             :  * @dev:                Device to free the descriptors
     144             :  * @filter:             Descriptor state filter
     145             :  * @first_index:        Index to start freeing from
     146             :  * @last_index:         Last index to be freed
     147             :  */
     148           0 : void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
     149             :                               unsigned int first_index, unsigned int last_index)
     150             : {
     151           0 :         struct xarray *xa = &dev->msi.data->__store;
     152             :         struct msi_desc *desc;
     153             :         unsigned long idx;
     154             : 
     155             :         lockdep_assert_held(&dev->msi.data->mutex);
     156             : 
     157           0 :         xa_for_each_range(xa, idx, desc, first_index, last_index) {
     158           0 :                 if (msi_desc_match(desc, filter)) {
     159           0 :                         xa_erase(xa, idx);
     160             :                         msi_free_desc(desc);
     161             :                 }
     162             :         }
     163           0 : }
     164             : 
     165           0 : void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
     166             : {
     167           0 :         *msg = entry->msg;
     168           0 : }
     169             : 
     170           0 : void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
     171             : {
     172           0 :         struct msi_desc *entry = irq_get_msi_desc(irq);
     173             : 
     174           0 :         __get_cached_msi_msg(entry, msg);
     175           0 : }
     176             : EXPORT_SYMBOL_GPL(get_cached_msi_msg);
     177             : 
     178           0 : static void msi_device_data_release(struct device *dev, void *res)
     179             : {
     180           0 :         struct msi_device_data *md = res;
     181             : 
     182           0 :         WARN_ON_ONCE(!xa_empty(&md->__store));
     183           0 :         xa_destroy(&md->__store);
     184           0 :         dev->msi.data = NULL;
     185           0 : }
     186             : 
     187             : /**
     188             :  * msi_setup_device_data - Setup MSI device data
     189             :  * @dev:        Device for which MSI device data should be set up
     190             :  *
     191             :  * Return: 0 on success, appropriate error code otherwise
     192             :  *
     193             :  * This can be called more than once for @dev. If the MSI device data is
     194             :  * already allocated the call succeeds. The allocated memory is
     195             :  * automatically released when the device is destroyed.
     196             :  */
     197           0 : int msi_setup_device_data(struct device *dev)
     198             : {
     199             :         struct msi_device_data *md;
     200             :         int ret;
     201             : 
     202           0 :         if (dev->msi.data)
     203             :                 return 0;
     204             : 
     205           0 :         md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
     206           0 :         if (!md)
     207             :                 return -ENOMEM;
     208             : 
     209           0 :         ret = msi_sysfs_create_group(dev);
     210           0 :         if (ret) {
     211           0 :                 devres_free(md);
     212           0 :                 return ret;
     213             :         }
     214             : 
     215           0 :         xa_init(&md->__store);
     216           0 :         mutex_init(&md->mutex);
     217           0 :         dev->msi.data = md;
     218           0 :         devres_add(dev, md);
     219           0 :         return 0;
     220             : }
     221             : 
     222             : /**
     223             :  * msi_lock_descs - Lock the MSI descriptor storage of a device
     224             :  * @dev:        Device to operate on
     225             :  */
     226           0 : void msi_lock_descs(struct device *dev)
     227             : {
     228           0 :         mutex_lock(&dev->msi.data->mutex);
     229           0 : }
     230             : EXPORT_SYMBOL_GPL(msi_lock_descs);
     231             : 
     232             : /**
     233             :  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
     234             :  * @dev:        Device to operate on
     235             :  */
     236           0 : void msi_unlock_descs(struct device *dev)
     237             : {
     238             :         /* Invalidate the index wich was cached by the iterator */
     239           0 :         dev->msi.data->__iter_idx = MSI_MAX_INDEX;
     240           0 :         mutex_unlock(&dev->msi.data->mutex);
     241           0 : }
     242             : EXPORT_SYMBOL_GPL(msi_unlock_descs);
     243             : 
     244           0 : static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
     245             : {
     246             :         struct msi_desc *desc;
     247             : 
     248           0 :         xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
     249           0 :                 if (msi_desc_match(desc, filter))
     250             :                         return desc;
     251             :         }
     252           0 :         md->__iter_idx = MSI_MAX_INDEX;
     253           0 :         return NULL;
     254             : }
     255             : 
     256             : /**
     257             :  * msi_first_desc - Get the first MSI descriptor of a device
     258             :  * @dev:        Device to operate on
     259             :  * @filter:     Descriptor state filter
     260             :  *
     261             :  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
     262             :  * must be invoked before the call.
     263             :  *
     264             :  * Return: Pointer to the first MSI descriptor matching the search
     265             :  *         criteria, NULL if none found.
     266             :  */
     267           0 : struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
     268             : {
     269           0 :         struct msi_device_data *md = dev->msi.data;
     270             : 
     271           0 :         if (WARN_ON_ONCE(!md))
     272             :                 return NULL;
     273             : 
     274             :         lockdep_assert_held(&md->mutex);
     275             : 
     276           0 :         md->__iter_idx = 0;
     277           0 :         return msi_find_desc(md, filter);
     278             : }
     279             : EXPORT_SYMBOL_GPL(msi_first_desc);
     280             : 
     281             : /**
     282             :  * msi_next_desc - Get the next MSI descriptor of a device
     283             :  * @dev:        Device to operate on
     284             :  *
     285             :  * The first invocation of msi_next_desc() has to be preceeded by a
     286             :  * successful invocation of __msi_first_desc(). Consecutive invocations are
     287             :  * only valid if the previous one was successful. All these operations have
     288             :  * to be done within the same MSI mutex held region.
     289             :  *
     290             :  * Return: Pointer to the next MSI descriptor matching the search
     291             :  *         criteria, NULL if none found.
     292             :  */
     293           0 : struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
     294             : {
     295           0 :         struct msi_device_data *md = dev->msi.data;
     296             : 
     297           0 :         if (WARN_ON_ONCE(!md))
     298             :                 return NULL;
     299             : 
     300             :         lockdep_assert_held(&md->mutex);
     301             : 
     302           0 :         if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
     303             :                 return NULL;
     304             : 
     305           0 :         md->__iter_idx++;
     306           0 :         return msi_find_desc(md, filter);
     307             : }
     308             : EXPORT_SYMBOL_GPL(msi_next_desc);
     309             : 
     310             : /**
     311             :  * msi_get_virq - Return Linux interrupt number of a MSI interrupt
     312             :  * @dev:        Device to operate on
     313             :  * @index:      MSI interrupt index to look for (0-based)
     314             :  *
     315             :  * Return: The Linux interrupt number on success (> 0), 0 if not found
     316             :  */
     317           0 : unsigned int msi_get_virq(struct device *dev, unsigned int index)
     318             : {
     319             :         struct msi_desc *desc;
     320           0 :         unsigned int ret = 0;
     321             :         bool pcimsi;
     322             : 
     323           0 :         if (!dev->msi.data)
     324             :                 return 0;
     325             : 
     326           0 :         pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
     327             : 
     328           0 :         msi_lock_descs(dev);
     329           0 :         desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
     330           0 :         if (desc && desc->irq) {
     331             :                 /*
     332             :                  * PCI-MSI has only one descriptor for multiple interrupts.
     333             :                  * PCI-MSIX and platform MSI use a descriptor per
     334             :                  * interrupt.
     335             :                  */
     336           0 :                 if (pcimsi) {
     337           0 :                         if (index < desc->nvec_used)
     338           0 :                                 ret = desc->irq + index;
     339             :                 } else {
     340             :                         ret = desc->irq;
     341             :                 }
     342             :         }
     343           0 :         msi_unlock_descs(dev);
     344           0 :         return ret;
     345             : }
     346             : EXPORT_SYMBOL_GPL(msi_get_virq);
     347             : 
     348             : #ifdef CONFIG_SYSFS
     349             : static struct attribute *msi_dev_attrs[] = {
     350             :         NULL
     351             : };
     352             : 
     353             : static const struct attribute_group msi_irqs_group = {
     354             :         .name   = "msi_irqs",
     355             :         .attrs  = msi_dev_attrs,
     356             : };
     357             : 
     358             : static inline int msi_sysfs_create_group(struct device *dev)
     359             : {
     360           0 :         return devm_device_add_group(dev, &msi_irqs_group);
     361             : }
     362             : 
     363           0 : static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
     364             :                              char *buf)
     365             : {
     366             :         /* MSI vs. MSIX is per device not per interrupt */
     367           0 :         bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
     368             : 
     369           0 :         return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
     370             : }
     371             : 
     372           0 : static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
     373             : {
     374           0 :         struct device_attribute *attrs = desc->sysfs_attrs;
     375             :         int i;
     376             : 
     377           0 :         if (!attrs)
     378             :                 return;
     379             : 
     380           0 :         desc->sysfs_attrs = NULL;
     381           0 :         for (i = 0; i < desc->nvec_used; i++) {
     382           0 :                 if (attrs[i].show)
     383           0 :                         sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
     384           0 :                 kfree(attrs[i].attr.name);
     385             :         }
     386           0 :         kfree(attrs);
     387             : }
     388             : 
     389           0 : static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
     390             : {
     391             :         struct device_attribute *attrs;
     392             :         int ret, i;
     393             : 
     394           0 :         attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
     395           0 :         if (!attrs)
     396             :                 return -ENOMEM;
     397             : 
     398           0 :         desc->sysfs_attrs = attrs;
     399           0 :         for (i = 0; i < desc->nvec_used; i++) {
     400             :                 sysfs_attr_init(&attrs[i].attr);
     401           0 :                 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
     402           0 :                 if (!attrs[i].attr.name) {
     403             :                         ret = -ENOMEM;
     404             :                         goto fail;
     405             :                 }
     406             : 
     407           0 :                 attrs[i].attr.mode = 0444;
     408           0 :                 attrs[i].show = msi_mode_show;
     409             : 
     410           0 :                 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
     411           0 :                 if (ret) {
     412           0 :                         attrs[i].show = NULL;
     413           0 :                         goto fail;
     414             :                 }
     415             :         }
     416             :         return 0;
     417             : 
     418             : fail:
     419           0 :         msi_sysfs_remove_desc(dev, desc);
     420           0 :         return ret;
     421             : }
     422             : 
     423             : #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
     424             : /**
     425             :  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
     426             :  * @dev:        The device (PCI, platform etc) which will get sysfs entries
     427             :  */
     428             : int msi_device_populate_sysfs(struct device *dev)
     429             : {
     430             :         struct msi_desc *desc;
     431             :         int ret;
     432             : 
     433             :         msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
     434             :                 if (desc->sysfs_attrs)
     435             :                         continue;
     436             :                 ret = msi_sysfs_populate_desc(dev, desc);
     437             :                 if (ret)
     438             :                         return ret;
     439             :         }
     440             :         return 0;
     441             : }
     442             : 
     443             : /**
     444             :  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
     445             :  * @dev:                The device (PCI, platform etc) for which to remove
     446             :  *                      sysfs entries
     447             :  */
     448             : void msi_device_destroy_sysfs(struct device *dev)
     449             : {
     450             :         struct msi_desc *desc;
     451             : 
     452             :         msi_for_each_desc(desc, dev, MSI_DESC_ALL)
     453             :                 msi_sysfs_remove_desc(dev, desc);
     454             : }
     455             : #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
     456             : #else /* CONFIG_SYSFS */
     457             : static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
     458             : static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
     459             : static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
     460             : #endif /* !CONFIG_SYSFS */
     461             : 
     462             : #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
     463             : static inline void irq_chip_write_msi_msg(struct irq_data *data,
     464             :                                           struct msi_msg *msg)
     465             : {
     466           0 :         data->chip->irq_write_msi_msg(data, msg);
     467             : }
     468             : 
     469           0 : static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
     470             : {
     471           0 :         struct msi_domain_info *info = domain->host_data;
     472             : 
     473             :         /*
     474             :          * If the MSI provider has messed with the second message and
     475             :          * not advertized that it is level-capable, signal the breakage.
     476             :          */
     477           0 :         WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
     478             :                   (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
     479             :                 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
     480           0 : }
     481             : 
     482             : /**
     483             :  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
     484             :  * @irq_data:   The irq data associated to the interrupt
     485             :  * @mask:       The affinity mask to set
     486             :  * @force:      Flag to enforce setting (disable online checks)
     487             :  *
     488             :  * Intended to be used by MSI interrupt controllers which are
     489             :  * implemented with hierarchical domains.
     490             :  *
     491             :  * Return: IRQ_SET_MASK_* result code
     492             :  */
     493           0 : int msi_domain_set_affinity(struct irq_data *irq_data,
     494             :                             const struct cpumask *mask, bool force)
     495             : {
     496           0 :         struct irq_data *parent = irq_data->parent_data;
     497           0 :         struct msi_msg msg[2] = { [1] = { }, };
     498             :         int ret;
     499             : 
     500           0 :         ret = parent->chip->irq_set_affinity(parent, mask, force);
     501           0 :         if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
     502           0 :                 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
     503           0 :                 msi_check_level(irq_data->domain, msg);
     504             :                 irq_chip_write_msi_msg(irq_data, msg);
     505             :         }
     506             : 
     507           0 :         return ret;
     508             : }
     509             : 
     510           0 : static int msi_domain_activate(struct irq_domain *domain,
     511             :                                struct irq_data *irq_data, bool early)
     512             : {
     513           0 :         struct msi_msg msg[2] = { [1] = { }, };
     514             : 
     515           0 :         BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
     516           0 :         msi_check_level(irq_data->domain, msg);
     517           0 :         irq_chip_write_msi_msg(irq_data, msg);
     518           0 :         return 0;
     519             : }
     520             : 
     521           0 : static void msi_domain_deactivate(struct irq_domain *domain,
     522             :                                   struct irq_data *irq_data)
     523             : {
     524             :         struct msi_msg msg[2];
     525             : 
     526           0 :         memset(msg, 0, sizeof(msg));
     527           0 :         irq_chip_write_msi_msg(irq_data, msg);
     528           0 : }
     529             : 
     530           0 : static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
     531             :                             unsigned int nr_irqs, void *arg)
     532             : {
     533           0 :         struct msi_domain_info *info = domain->host_data;
     534           0 :         struct msi_domain_ops *ops = info->ops;
     535           0 :         irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
     536             :         int i, ret;
     537             : 
     538           0 :         if (irq_find_mapping(domain, hwirq) > 0)
     539             :                 return -EEXIST;
     540             : 
     541           0 :         if (domain->parent) {
     542           0 :                 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
     543           0 :                 if (ret < 0)
     544             :                         return ret;
     545             :         }
     546             : 
     547           0 :         for (i = 0; i < nr_irqs; i++) {
     548           0 :                 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
     549           0 :                 if (ret < 0) {
     550           0 :                         if (ops->msi_free) {
     551           0 :                                 for (i--; i > 0; i--)
     552           0 :                                         ops->msi_free(domain, info, virq + i);
     553             :                         }
     554           0 :                         irq_domain_free_irqs_top(domain, virq, nr_irqs);
     555           0 :                         return ret;
     556             :                 }
     557             :         }
     558             : 
     559             :         return 0;
     560             : }
     561             : 
     562           0 : static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
     563             :                             unsigned int nr_irqs)
     564             : {
     565           0 :         struct msi_domain_info *info = domain->host_data;
     566             :         int i;
     567             : 
     568           0 :         if (info->ops->msi_free) {
     569           0 :                 for (i = 0; i < nr_irqs; i++)
     570           0 :                         info->ops->msi_free(domain, info, virq + i);
     571             :         }
     572           0 :         irq_domain_free_irqs_top(domain, virq, nr_irqs);
     573           0 : }
     574             : 
     575             : static const struct irq_domain_ops msi_domain_ops = {
     576             :         .alloc          = msi_domain_alloc,
     577             :         .free           = msi_domain_free,
     578             :         .activate       = msi_domain_activate,
     579             :         .deactivate     = msi_domain_deactivate,
     580             : };
     581             : 
     582           0 : static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
     583             :                                                 msi_alloc_info_t *arg)
     584             : {
     585           0 :         return arg->hwirq;
     586             : }
     587             : 
     588           0 : static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
     589             :                                   int nvec, msi_alloc_info_t *arg)
     590             : {
     591           0 :         memset(arg, 0, sizeof(*arg));
     592           0 :         return 0;
     593             : }
     594             : 
     595           0 : static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
     596             :                                     struct msi_desc *desc)
     597             : {
     598           0 :         arg->desc = desc;
     599           0 : }
     600             : 
     601           0 : static int msi_domain_ops_init(struct irq_domain *domain,
     602             :                                struct msi_domain_info *info,
     603             :                                unsigned int virq, irq_hw_number_t hwirq,
     604             :                                msi_alloc_info_t *arg)
     605             : {
     606           0 :         irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
     607             :                                       info->chip_data);
     608           0 :         if (info->handler && info->handler_name) {
     609           0 :                 __irq_set_handler(virq, info->handler, 0, info->handler_name);
     610           0 :                 if (info->handler_data)
     611           0 :                         irq_set_handler_data(virq, info->handler_data);
     612             :         }
     613           0 :         return 0;
     614             : }
     615             : 
     616           0 : static int msi_domain_ops_check(struct irq_domain *domain,
     617             :                                 struct msi_domain_info *info,
     618             :                                 struct device *dev)
     619             : {
     620           0 :         return 0;
     621             : }
     622             : 
     623             : static struct msi_domain_ops msi_domain_ops_default = {
     624             :         .get_hwirq              = msi_domain_ops_get_hwirq,
     625             :         .msi_init               = msi_domain_ops_init,
     626             :         .msi_check              = msi_domain_ops_check,
     627             :         .msi_prepare            = msi_domain_ops_prepare,
     628             :         .set_desc               = msi_domain_ops_set_desc,
     629             :         .domain_alloc_irqs      = __msi_domain_alloc_irqs,
     630             :         .domain_free_irqs       = __msi_domain_free_irqs,
     631             : };
     632             : 
     633           0 : static void msi_domain_update_dom_ops(struct msi_domain_info *info)
     634             : {
     635           0 :         struct msi_domain_ops *ops = info->ops;
     636             : 
     637           0 :         if (ops == NULL) {
     638           0 :                 info->ops = &msi_domain_ops_default;
     639             :                 return;
     640             :         }
     641             : 
     642           0 :         if (ops->domain_alloc_irqs == NULL)
     643           0 :                 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
     644           0 :         if (ops->domain_free_irqs == NULL)
     645           0 :                 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
     646             : 
     647           0 :         if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
     648             :                 return;
     649             : 
     650           0 :         if (ops->get_hwirq == NULL)
     651           0 :                 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
     652           0 :         if (ops->msi_init == NULL)
     653           0 :                 ops->msi_init = msi_domain_ops_default.msi_init;
     654           0 :         if (ops->msi_check == NULL)
     655           0 :                 ops->msi_check = msi_domain_ops_default.msi_check;
     656           0 :         if (ops->msi_prepare == NULL)
     657           0 :                 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
     658           0 :         if (ops->set_desc == NULL)
     659           0 :                 ops->set_desc = msi_domain_ops_default.set_desc;
     660             : }
     661             : 
     662           0 : static void msi_domain_update_chip_ops(struct msi_domain_info *info)
     663             : {
     664           0 :         struct irq_chip *chip = info->chip;
     665             : 
     666           0 :         BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
     667           0 :         if (!chip->irq_set_affinity)
     668           0 :                 chip->irq_set_affinity = msi_domain_set_affinity;
     669           0 : }
     670             : 
     671             : /**
     672             :  * msi_create_irq_domain - Create an MSI interrupt domain
     673             :  * @fwnode:     Optional fwnode of the interrupt controller
     674             :  * @info:       MSI domain info
     675             :  * @parent:     Parent irq domain
     676             :  *
     677             :  * Return: pointer to the created &struct irq_domain or %NULL on failure
     678             :  */
     679           0 : struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
     680             :                                          struct msi_domain_info *info,
     681             :                                          struct irq_domain *parent)
     682             : {
     683             :         struct irq_domain *domain;
     684             : 
     685           0 :         msi_domain_update_dom_ops(info);
     686           0 :         if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
     687           0 :                 msi_domain_update_chip_ops(info);
     688             : 
     689           0 :         domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
     690             :                                              fwnode, &msi_domain_ops, info);
     691             : 
     692           0 :         if (domain && !domain->name && info->chip)
     693           0 :                 domain->name = info->chip->name;
     694             : 
     695           0 :         return domain;
     696             : }
     697             : 
     698           0 : int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
     699             :                             int nvec, msi_alloc_info_t *arg)
     700             : {
     701           0 :         struct msi_domain_info *info = domain->host_data;
     702           0 :         struct msi_domain_ops *ops = info->ops;
     703             :         int ret;
     704             : 
     705           0 :         ret = ops->msi_check(domain, info, dev);
     706           0 :         if (ret == 0)
     707           0 :                 ret = ops->msi_prepare(domain, dev, nvec, arg);
     708             : 
     709           0 :         return ret;
     710             : }
     711             : 
     712           0 : int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
     713             :                              int virq_base, int nvec, msi_alloc_info_t *arg)
     714             : {
     715           0 :         struct msi_domain_info *info = domain->host_data;
     716           0 :         struct msi_domain_ops *ops = info->ops;
     717             :         struct msi_desc *desc;
     718             :         int ret, virq;
     719             : 
     720           0 :         msi_lock_descs(dev);
     721           0 :         ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
     722           0 :         if (ret)
     723             :                 goto unlock;
     724             : 
     725           0 :         for (virq = virq_base; virq < virq_base + nvec; virq++) {
     726           0 :                 desc = xa_load(&dev->msi.data->__store, virq);
     727           0 :                 desc->irq = virq;
     728             : 
     729           0 :                 ops->set_desc(arg, desc);
     730           0 :                 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
     731           0 :                 if (ret)
     732             :                         goto fail;
     733             : 
     734           0 :                 irq_set_msi_desc(virq, desc);
     735             :         }
     736           0 :         msi_unlock_descs(dev);
     737           0 :         return 0;
     738             : 
     739             : fail:
     740           0 :         for (--virq; virq >= virq_base; virq--)
     741           0 :                 irq_domain_free_irqs_common(domain, virq, 1);
     742           0 :         msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
     743             : unlock:
     744           0 :         msi_unlock_descs(dev);
     745           0 :         return ret;
     746             : }
     747             : 
     748             : /*
     749             :  * Carefully check whether the device can use reservation mode. If
     750             :  * reservation mode is enabled then the early activation will assign a
     751             :  * dummy vector to the device. If the PCI/MSI device does not support
     752             :  * masking of the entry then this can result in spurious interrupts when
     753             :  * the device driver is not absolutely careful. But even then a malfunction
     754             :  * of the hardware could result in a spurious interrupt on the dummy vector
     755             :  * and render the device unusable. If the entry can be masked then the core
     756             :  * logic will prevent the spurious interrupt and reservation mode can be
     757             :  * used. For now reservation mode is restricted to PCI/MSI.
     758             :  */
     759           0 : static bool msi_check_reservation_mode(struct irq_domain *domain,
     760             :                                        struct msi_domain_info *info,
     761             :                                        struct device *dev)
     762             : {
     763             :         struct msi_desc *desc;
     764             : 
     765           0 :         switch(domain->bus_token) {
     766             :         case DOMAIN_BUS_PCI_MSI:
     767             :         case DOMAIN_BUS_VMD_MSI:
     768             :                 break;
     769             :         default:
     770             :                 return false;
     771             :         }
     772             : 
     773           0 :         if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
     774             :                 return false;
     775             : 
     776           0 :         if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
     777             :                 return false;
     778             : 
     779             :         /*
     780             :          * Checking the first MSI descriptor is sufficient. MSIX supports
     781             :          * masking and MSI does so when the can_mask attribute is set.
     782             :          */
     783           0 :         desc = msi_first_desc(dev, MSI_DESC_ALL);
     784           0 :         return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
     785             : }
     786             : 
     787             : static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
     788             :                                int allocated)
     789             : {
     790           0 :         switch(domain->bus_token) {
     791             :         case DOMAIN_BUS_PCI_MSI:
     792             :         case DOMAIN_BUS_VMD_MSI:
     793             :                 if (IS_ENABLED(CONFIG_PCI_MSI))
     794             :                         break;
     795             :                 fallthrough;
     796             :         default:
     797             :                 return -ENOSPC;
     798             :         }
     799             : 
     800             :         /* Let a failed PCI multi MSI allocation retry */
     801           0 :         if (desc->nvec_used > 1)
     802             :                 return 1;
     803             : 
     804             :         /* If there was a successful allocation let the caller know */
     805           0 :         return allocated ? allocated : -ENOSPC;
     806             : }
     807             : 
     808             : #define VIRQ_CAN_RESERVE        0x01
     809             : #define VIRQ_ACTIVATE           0x02
     810             : #define VIRQ_NOMASK_QUIRK       0x04
     811             : 
     812           0 : static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
     813             : {
     814           0 :         struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
     815             :         int ret;
     816             : 
     817           0 :         if (!(vflags & VIRQ_CAN_RESERVE)) {
     818           0 :                 irqd_clr_can_reserve(irqd);
     819           0 :                 if (vflags & VIRQ_NOMASK_QUIRK)
     820           0 :                         irqd_set_msi_nomask_quirk(irqd);
     821             :         }
     822             : 
     823           0 :         if (!(vflags & VIRQ_ACTIVATE))
     824             :                 return 0;
     825             : 
     826           0 :         ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
     827           0 :         if (ret)
     828             :                 return ret;
     829             :         /*
     830             :          * If the interrupt uses reservation mode, clear the activated bit
     831             :          * so request_irq() will assign the final vector.
     832             :          */
     833           0 :         if (vflags & VIRQ_CAN_RESERVE)
     834           0 :                 irqd_clr_activated(irqd);
     835             :         return 0;
     836             : }
     837             : 
     838           0 : int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
     839             :                             int nvec)
     840             : {
     841           0 :         struct msi_domain_info *info = domain->host_data;
     842           0 :         struct msi_domain_ops *ops = info->ops;
     843           0 :         msi_alloc_info_t arg = { };
     844           0 :         unsigned int vflags = 0;
     845             :         struct msi_desc *desc;
     846           0 :         int allocated = 0;
     847             :         int i, ret, virq;
     848             : 
     849           0 :         ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
     850           0 :         if (ret)
     851             :                 return ret;
     852             : 
     853             :         /*
     854             :          * This flag is set by the PCI layer as we need to activate
     855             :          * the MSI entries before the PCI layer enables MSI in the
     856             :          * card. Otherwise the card latches a random msi message.
     857             :          */
     858           0 :         if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
     859           0 :                 vflags |= VIRQ_ACTIVATE;
     860             : 
     861             :         /*
     862             :          * Interrupt can use a reserved vector and will not occupy
     863             :          * a real device vector until the interrupt is requested.
     864             :          */
     865           0 :         if (msi_check_reservation_mode(domain, info, dev)) {
     866           0 :                 vflags |= VIRQ_CAN_RESERVE;
     867             :                 /*
     868             :                  * MSI affinity setting requires a special quirk (X86) when
     869             :                  * reservation mode is active.
     870             :                  */
     871           0 :                 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
     872           0 :                         vflags |= VIRQ_NOMASK_QUIRK;
     873             :         }
     874             : 
     875           0 :         msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
     876           0 :                 ops->set_desc(&arg, desc);
     877             : 
     878           0 :                 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
     879             :                                                dev_to_node(dev), &arg, false,
     880           0 :                                                desc->affinity);
     881           0 :                 if (virq < 0)
     882           0 :                         return msi_handle_pci_fail(domain, desc, allocated);
     883             : 
     884           0 :                 for (i = 0; i < desc->nvec_used; i++) {
     885           0 :                         irq_set_msi_desc_off(virq, i, desc);
     886           0 :                         irq_debugfs_copy_devname(virq + i, dev);
     887           0 :                         ret = msi_init_virq(domain, virq + i, vflags);
     888           0 :                         if (ret)
     889             :                                 return ret;
     890             :                 }
     891           0 :                 if (info->flags & MSI_FLAG_DEV_SYSFS) {
     892           0 :                         ret = msi_sysfs_populate_desc(dev, desc);
     893           0 :                         if (ret)
     894             :                                 return ret;
     895             :                 }
     896           0 :                 allocated++;
     897             :         }
     898             :         return 0;
     899             : }
     900             : 
     901             : static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
     902             :                                            struct device *dev,
     903             :                                            unsigned int num_descs)
     904             : {
     905           0 :         if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
     906             :                 return 0;
     907             : 
     908           0 :         return msi_add_simple_msi_descs(dev, 0, num_descs);
     909             : }
     910             : 
     911             : /**
     912             :  * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
     913             :  * @domain:     The domain to allocate from
     914             :  * @dev:        Pointer to device struct of the device for which the interrupts
     915             :  *              are allocated
     916             :  * @nvec:       The number of interrupts to allocate
     917             :  *
     918             :  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
     919             :  * pair. Use this for MSI irqdomains which implement their own vector
     920             :  * allocation/free.
     921             :  *
     922             :  * Return: %0 on success or an error code.
     923             :  */
     924           0 : int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
     925             :                                        int nvec)
     926             : {
     927           0 :         struct msi_domain_info *info = domain->host_data;
     928           0 :         struct msi_domain_ops *ops = info->ops;
     929             :         int ret;
     930             : 
     931             :         lockdep_assert_held(&dev->msi.data->mutex);
     932             : 
     933           0 :         ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
     934           0 :         if (ret)
     935             :                 return ret;
     936             : 
     937           0 :         ret = ops->domain_alloc_irqs(domain, dev, nvec);
     938           0 :         if (ret)
     939           0 :                 msi_domain_free_irqs_descs_locked(domain, dev);
     940             :         return ret;
     941             : }
     942             : 
     943             : /**
     944             :  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
     945             :  * @domain:     The domain to allocate from
     946             :  * @dev:        Pointer to device struct of the device for which the interrupts
     947             :  *              are allocated
     948             :  * @nvec:       The number of interrupts to allocate
     949             :  *
     950             :  * Return: %0 on success or an error code.
     951             :  */
     952           0 : int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
     953             : {
     954             :         int ret;
     955             : 
     956           0 :         msi_lock_descs(dev);
     957           0 :         ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
     958           0 :         msi_unlock_descs(dev);
     959           0 :         return ret;
     960             : }
     961             : 
     962           0 : void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
     963             : {
     964           0 :         struct msi_domain_info *info = domain->host_data;
     965             :         struct irq_data *irqd;
     966             :         struct msi_desc *desc;
     967             :         int i;
     968             : 
     969             :         /* Only handle MSI entries which have an interrupt associated */
     970           0 :         msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
     971             :                 /* Make sure all interrupts are deactivated */
     972           0 :                 for (i = 0; i < desc->nvec_used; i++) {
     973           0 :                         irqd = irq_domain_get_irq_data(domain, desc->irq + i);
     974           0 :                         if (irqd && irqd_is_activated(irqd))
     975           0 :                                 irq_domain_deactivate_irq(irqd);
     976             :                 }
     977             : 
     978           0 :                 irq_domain_free_irqs(desc->irq, desc->nvec_used);
     979           0 :                 if (info->flags & MSI_FLAG_DEV_SYSFS)
     980           0 :                         msi_sysfs_remove_desc(dev, desc);
     981           0 :                 desc->irq = 0;
     982             :         }
     983           0 : }
     984             : 
     985             : static void msi_domain_free_msi_descs(struct msi_domain_info *info,
     986             :                                       struct device *dev)
     987             : {
     988           0 :         if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
     989             :                 msi_free_msi_descs(dev);
     990             : }
     991             : 
     992             : /**
     993             :  * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
     994             :  * @domain:     The domain to managing the interrupts
     995             :  * @dev:        Pointer to device struct of the device for which the interrupts
     996             :  *              are free
     997             :  *
     998             :  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
     999             :  * pair. Use this for MSI irqdomains which implement their own vector
    1000             :  * allocation.
    1001             :  */
    1002           0 : void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
    1003             : {
    1004           0 :         struct msi_domain_info *info = domain->host_data;
    1005           0 :         struct msi_domain_ops *ops = info->ops;
    1006             : 
    1007             :         lockdep_assert_held(&dev->msi.data->mutex);
    1008             : 
    1009           0 :         ops->domain_free_irqs(domain, dev);
    1010           0 :         msi_domain_free_msi_descs(info, dev);
    1011           0 : }
    1012             : 
    1013             : /**
    1014             :  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
    1015             :  * @domain:     The domain to managing the interrupts
    1016             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1017             :  *              are free
    1018             :  */
    1019           0 : void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
    1020             : {
    1021           0 :         msi_lock_descs(dev);
    1022           0 :         msi_domain_free_irqs_descs_locked(domain, dev);
    1023           0 :         msi_unlock_descs(dev);
    1024           0 : }
    1025             : 
    1026             : /**
    1027             :  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
    1028             :  * @domain:     The interrupt domain to retrieve data from
    1029             :  *
    1030             :  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
    1031             :  */
    1032           0 : struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
    1033             : {
    1034           0 :         return (struct msi_domain_info *)domain->host_data;
    1035             : }
    1036             : 
    1037             : #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */

Generated by: LCOV version 1.14