LCOV - code coverage report
Current view: top level - drivers/pci - pci.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 6 2017 0.3 %
Date: 2022-12-09 01:23:36 Functions: 2 242 0.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * PCI Bus Services, see include/linux/pci.h for further explanation.
       4             :  *
       5             :  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
       6             :  * David Mosberger-Tang
       7             :  *
       8             :  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
       9             :  */
      10             : 
      11             : #include <linux/acpi.h>
      12             : #include <linux/kernel.h>
      13             : #include <linux/delay.h>
      14             : #include <linux/dmi.h>
      15             : #include <linux/init.h>
      16             : #include <linux/msi.h>
      17             : #include <linux/of.h>
      18             : #include <linux/pci.h>
      19             : #include <linux/pm.h>
      20             : #include <linux/slab.h>
      21             : #include <linux/module.h>
      22             : #include <linux/spinlock.h>
      23             : #include <linux/string.h>
      24             : #include <linux/log2.h>
      25             : #include <linux/logic_pio.h>
      26             : #include <linux/pm_wakeup.h>
      27             : #include <linux/interrupt.h>
      28             : #include <linux/device.h>
      29             : #include <linux/pm_runtime.h>
      30             : #include <linux/pci_hotplug.h>
      31             : #include <linux/vmalloc.h>
      32             : #include <asm/dma.h>
      33             : #include <linux/aer.h>
      34             : #include <linux/bitfield.h>
      35             : #include "pci.h"
      36             : 
      37             : DEFINE_MUTEX(pci_slot_mutex);
      38             : 
      39             : const char *pci_power_names[] = {
      40             :         "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
      41             : };
      42             : EXPORT_SYMBOL_GPL(pci_power_names);
      43             : 
      44             : int isa_dma_bridge_buggy;
      45             : EXPORT_SYMBOL(isa_dma_bridge_buggy);
      46             : 
      47             : int pci_pci_problems;
      48             : EXPORT_SYMBOL(pci_pci_problems);
      49             : 
      50             : unsigned int pci_pm_d3hot_delay;
      51             : 
      52             : static void pci_pme_list_scan(struct work_struct *work);
      53             : 
      54             : static LIST_HEAD(pci_pme_list);
      55             : static DEFINE_MUTEX(pci_pme_list_mutex);
      56             : static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
      57             : 
      58             : struct pci_pme_device {
      59             :         struct list_head list;
      60             :         struct pci_dev *dev;
      61             : };
      62             : 
      63             : #define PME_TIMEOUT 1000 /* How long between PME checks */
      64             : 
      65             : static void pci_dev_d3_sleep(struct pci_dev *dev)
      66             : {
      67           0 :         unsigned int delay = dev->d3hot_delay;
      68             : 
      69           0 :         if (delay < pci_pm_d3hot_delay)
      70           0 :                 delay = pci_pm_d3hot_delay;
      71             : 
      72           0 :         if (delay)
      73           0 :                 msleep(delay);
      74             : }
      75             : 
      76           0 : bool pci_reset_supported(struct pci_dev *dev)
      77             : {
      78           0 :         return dev->reset_methods[0] != 0;
      79             : }
      80             : 
      81             : #ifdef CONFIG_PCI_DOMAINS
      82             : int pci_domains_supported = 1;
      83             : #endif
      84             : 
      85             : #define DEFAULT_CARDBUS_IO_SIZE         (256)
      86             : #define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
      87             : /* pci=cbmemsize=nnM,cbiosize=nn can override this */
      88             : unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
      89             : unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
      90             : 
      91             : #define DEFAULT_HOTPLUG_IO_SIZE         (256)
      92             : #define DEFAULT_HOTPLUG_MMIO_SIZE       (2*1024*1024)
      93             : #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE  (2*1024*1024)
      94             : /* hpiosize=nn can override this */
      95             : unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
      96             : /*
      97             :  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
      98             :  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
      99             :  * pci=hpmemsize=nnM overrides both
     100             :  */
     101             : unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
     102             : unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
     103             : 
     104             : #define DEFAULT_HOTPLUG_BUS_SIZE        1
     105             : unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
     106             : 
     107             : 
     108             : /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
     109             : #ifdef CONFIG_PCIE_BUS_TUNE_OFF
     110             : enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
     111             : #elif defined CONFIG_PCIE_BUS_SAFE
     112             : enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
     113             : #elif defined CONFIG_PCIE_BUS_PERFORMANCE
     114             : enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
     115             : #elif defined CONFIG_PCIE_BUS_PEER2PEER
     116             : enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
     117             : #else
     118             : enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
     119             : #endif
     120             : 
     121             : /*
     122             :  * The default CLS is used if arch didn't set CLS explicitly and not
     123             :  * all pci devices agree on the same value.  Arch can override either
     124             :  * the dfl or actual value as it sees fit.  Don't forget this is
     125             :  * measured in 32-bit words, not bytes.
     126             :  */
     127             : u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
     128             : u8 pci_cache_line_size;
     129             : 
     130             : /*
     131             :  * If we set up a device for bus mastering, we need to check the latency
     132             :  * timer as certain BIOSes forget to set it properly.
     133             :  */
     134             : unsigned int pcibios_max_latency = 255;
     135             : 
     136             : /* If set, the PCIe ARI capability will not be used. */
     137             : static bool pcie_ari_disabled;
     138             : 
     139             : /* If set, the PCIe ATS capability will not be used. */
     140             : static bool pcie_ats_disabled;
     141             : 
     142             : /* If set, the PCI config space of each device is printed during boot. */
     143             : bool pci_early_dump;
     144             : 
     145           0 : bool pci_ats_disabled(void)
     146             : {
     147           0 :         return pcie_ats_disabled;
     148             : }
     149             : EXPORT_SYMBOL_GPL(pci_ats_disabled);
     150             : 
     151             : /* Disable bridge_d3 for all PCIe ports */
     152             : static bool pci_bridge_d3_disable;
     153             : /* Force bridge_d3 for all PCIe ports */
     154             : static bool pci_bridge_d3_force;
     155             : 
     156           0 : static int __init pcie_port_pm_setup(char *str)
     157             : {
     158           0 :         if (!strcmp(str, "off"))
     159           0 :                 pci_bridge_d3_disable = true;
     160           0 :         else if (!strcmp(str, "force"))
     161           0 :                 pci_bridge_d3_force = true;
     162           0 :         return 1;
     163             : }
     164             : __setup("pcie_port_pm=", pcie_port_pm_setup);
     165             : 
     166             : /* Time to wait after a reset for device to become responsive */
     167             : #define PCIE_RESET_READY_POLL_MS 60000
     168             : 
     169             : /**
     170             :  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
     171             :  * @bus: pointer to PCI bus structure to search
     172             :  *
     173             :  * Given a PCI bus, returns the highest PCI bus number present in the set
     174             :  * including the given PCI bus and its list of child PCI buses.
     175             :  */
     176           0 : unsigned char pci_bus_max_busnr(struct pci_bus *bus)
     177             : {
     178             :         struct pci_bus *tmp;
     179             :         unsigned char max, n;
     180             : 
     181           0 :         max = bus->busn_res.end;
     182           0 :         list_for_each_entry(tmp, &bus->children, node) {
     183           0 :                 n = pci_bus_max_busnr(tmp);
     184           0 :                 if (n > max)
     185           0 :                         max = n;
     186             :         }
     187           0 :         return max;
     188             : }
     189             : EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
     190             : 
     191             : /**
     192             :  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
     193             :  * @pdev: the PCI device
     194             :  *
     195             :  * Returns error bits set in PCI_STATUS and clears them.
     196             :  */
     197           0 : int pci_status_get_and_clear_errors(struct pci_dev *pdev)
     198             : {
     199             :         u16 status;
     200             :         int ret;
     201             : 
     202           0 :         ret = pci_read_config_word(pdev, PCI_STATUS, &status);
     203           0 :         if (ret != PCIBIOS_SUCCESSFUL)
     204             :                 return -EIO;
     205             : 
     206           0 :         status &= PCI_STATUS_ERROR_BITS;
     207           0 :         if (status)
     208           0 :                 pci_write_config_word(pdev, PCI_STATUS, status);
     209             : 
     210           0 :         return status;
     211             : }
     212             : EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
     213             : 
     214             : #ifdef CONFIG_HAS_IOMEM
     215           0 : static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
     216             :                                             bool write_combine)
     217             : {
     218           0 :         struct resource *res = &pdev->resource[bar];
     219           0 :         resource_size_t start = res->start;
     220           0 :         resource_size_t size = resource_size(res);
     221             : 
     222             :         /*
     223             :          * Make sure the BAR is actually a memory resource, not an IO resource
     224             :          */
     225           0 :         if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
     226           0 :                 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
     227           0 :                 return NULL;
     228             :         }
     229             : 
     230           0 :         if (write_combine)
     231           0 :                 return ioremap_wc(start, size);
     232             : 
     233           0 :         return ioremap(start, size);
     234             : }
     235             : 
     236           0 : void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
     237             : {
     238           0 :         return __pci_ioremap_resource(pdev, bar, false);
     239             : }
     240             : EXPORT_SYMBOL_GPL(pci_ioremap_bar);
     241             : 
     242           0 : void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
     243             : {
     244           0 :         return __pci_ioremap_resource(pdev, bar, true);
     245             : }
     246             : EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
     247             : #endif
     248             : 
     249             : /**
     250             :  * pci_dev_str_match_path - test if a path string matches a device
     251             :  * @dev: the PCI device to test
     252             :  * @path: string to match the device against
     253             :  * @endptr: pointer to the string after the match
     254             :  *
     255             :  * Test if a string (typically from a kernel parameter) formatted as a
     256             :  * path of device/function addresses matches a PCI device. The string must
     257             :  * be of the form:
     258             :  *
     259             :  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
     260             :  *
     261             :  * A path for a device can be obtained using 'lspci -t'.  Using a path
     262             :  * is more robust against bus renumbering than using only a single bus,
     263             :  * device and function address.
     264             :  *
     265             :  * Returns 1 if the string matches the device, 0 if it does not and
     266             :  * a negative error code if it fails to parse the string.
     267             :  */
     268           0 : static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
     269             :                                   const char **endptr)
     270             : {
     271             :         int ret;
     272             :         unsigned int seg, bus, slot, func;
     273             :         char *wpath, *p;
     274             :         char end;
     275             : 
     276           0 :         *endptr = strchrnul(path, ';');
     277             : 
     278           0 :         wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
     279           0 :         if (!wpath)
     280             :                 return -ENOMEM;
     281             : 
     282             :         while (1) {
     283           0 :                 p = strrchr(wpath, '/');
     284           0 :                 if (!p)
     285             :                         break;
     286           0 :                 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
     287           0 :                 if (ret != 2) {
     288             :                         ret = -EINVAL;
     289             :                         goto free_and_exit;
     290             :                 }
     291             : 
     292           0 :                 if (dev->devfn != PCI_DEVFN(slot, func)) {
     293             :                         ret = 0;
     294             :                         goto free_and_exit;
     295             :                 }
     296             : 
     297             :                 /*
     298             :                  * Note: we don't need to get a reference to the upstream
     299             :                  * bridge because we hold a reference to the top level
     300             :                  * device which should hold a reference to the bridge,
     301             :                  * and so on.
     302             :                  */
     303           0 :                 dev = pci_upstream_bridge(dev);
     304           0 :                 if (!dev) {
     305             :                         ret = 0;
     306             :                         goto free_and_exit;
     307             :                 }
     308             : 
     309           0 :                 *p = 0;
     310             :         }
     311             : 
     312           0 :         ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
     313             :                      &func, &end);
     314           0 :         if (ret != 4) {
     315           0 :                 seg = 0;
     316           0 :                 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
     317           0 :                 if (ret != 3) {
     318             :                         ret = -EINVAL;
     319             :                         goto free_and_exit;
     320             :                 }
     321             :         }
     322             : 
     323           0 :         ret = (seg == pci_domain_nr(dev->bus) &&
     324           0 :                bus == dev->bus->number &&
     325           0 :                dev->devfn == PCI_DEVFN(slot, func));
     326             : 
     327             : free_and_exit:
     328           0 :         kfree(wpath);
     329           0 :         return ret;
     330             : }
     331             : 
     332             : /**
     333             :  * pci_dev_str_match - test if a string matches a device
     334             :  * @dev: the PCI device to test
     335             :  * @p: string to match the device against
     336             :  * @endptr: pointer to the string after the match
     337             :  *
     338             :  * Test if a string (typically from a kernel parameter) matches a specified
     339             :  * PCI device. The string may be of one of the following formats:
     340             :  *
     341             :  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
     342             :  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
     343             :  *
     344             :  * The first format specifies a PCI bus/device/function address which
     345             :  * may change if new hardware is inserted, if motherboard firmware changes,
     346             :  * or due to changes caused in kernel parameters. If the domain is
     347             :  * left unspecified, it is taken to be 0.  In order to be robust against
     348             :  * bus renumbering issues, a path of PCI device/function numbers may be used
     349             :  * to address the specific device.  The path for a device can be determined
     350             :  * through the use of 'lspci -t'.
     351             :  *
     352             :  * The second format matches devices using IDs in the configuration
     353             :  * space which may match multiple devices in the system. A value of 0
     354             :  * for any field will match all devices. (Note: this differs from
     355             :  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
     356             :  * legacy reasons and convenience so users don't have to specify
     357             :  * FFFFFFFFs on the command line.)
     358             :  *
     359             :  * Returns 1 if the string matches the device, 0 if it does not and
     360             :  * a negative error code if the string cannot be parsed.
     361             :  */
     362           0 : static int pci_dev_str_match(struct pci_dev *dev, const char *p,
     363             :                              const char **endptr)
     364             : {
     365             :         int ret;
     366             :         int count;
     367             :         unsigned short vendor, device, subsystem_vendor, subsystem_device;
     368             : 
     369           0 :         if (strncmp(p, "pci:", 4) == 0) {
     370             :                 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
     371           0 :                 p += 4;
     372           0 :                 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
     373             :                              &subsystem_vendor, &subsystem_device, &count);
     374           0 :                 if (ret != 4) {
     375           0 :                         ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
     376           0 :                         if (ret != 2)
     377             :                                 return -EINVAL;
     378             : 
     379           0 :                         subsystem_vendor = 0;
     380           0 :                         subsystem_device = 0;
     381             :                 }
     382             : 
     383           0 :                 p += count;
     384             : 
     385           0 :                 if ((!vendor || vendor == dev->vendor) &&
     386           0 :                     (!device || device == dev->device) &&
     387           0 :                     (!subsystem_vendor ||
     388           0 :                             subsystem_vendor == dev->subsystem_vendor) &&
     389           0 :                     (!subsystem_device ||
     390           0 :                             subsystem_device == dev->subsystem_device))
     391             :                         goto found;
     392             :         } else {
     393             :                 /*
     394             :                  * PCI Bus, Device, Function IDs are specified
     395             :                  * (optionally, may include a path of devfns following it)
     396             :                  */
     397           0 :                 ret = pci_dev_str_match_path(dev, p, &p);
     398           0 :                 if (ret < 0)
     399             :                         return ret;
     400           0 :                 else if (ret)
     401             :                         goto found;
     402             :         }
     403             : 
     404           0 :         *endptr = p;
     405           0 :         return 0;
     406             : 
     407             : found:
     408           0 :         *endptr = p;
     409           0 :         return 1;
     410             : }
     411             : 
     412           0 : static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
     413             :                                   u8 pos, int cap, int *ttl)
     414             : {
     415             :         u8 id;
     416             :         u16 ent;
     417             : 
     418           0 :         pci_bus_read_config_byte(bus, devfn, pos, &pos);
     419             : 
     420           0 :         while ((*ttl)--) {
     421           0 :                 if (pos < 0x40)
     422             :                         break;
     423           0 :                 pos &= ~3;
     424           0 :                 pci_bus_read_config_word(bus, devfn, pos, &ent);
     425             : 
     426           0 :                 id = ent & 0xff;
     427           0 :                 if (id == 0xff)
     428             :                         break;
     429           0 :                 if (id == cap)
     430           0 :                         return pos;
     431           0 :                 pos = (ent >> 8);
     432             :         }
     433             :         return 0;
     434             : }
     435             : 
     436             : static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
     437             :                               u8 pos, int cap)
     438             : {
     439           0 :         int ttl = PCI_FIND_CAP_TTL;
     440             : 
     441           0 :         return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
     442             : }
     443             : 
     444           0 : u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
     445             : {
     446           0 :         return __pci_find_next_cap(dev->bus, dev->devfn,
     447           0 :                                    pos + PCI_CAP_LIST_NEXT, cap);
     448             : }
     449             : EXPORT_SYMBOL_GPL(pci_find_next_capability);
     450             : 
     451           0 : static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
     452             :                                     unsigned int devfn, u8 hdr_type)
     453             : {
     454             :         u16 status;
     455             : 
     456           0 :         pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
     457           0 :         if (!(status & PCI_STATUS_CAP_LIST))
     458             :                 return 0;
     459             : 
     460           0 :         switch (hdr_type) {
     461             :         case PCI_HEADER_TYPE_NORMAL:
     462             :         case PCI_HEADER_TYPE_BRIDGE:
     463             :                 return PCI_CAPABILITY_LIST;
     464             :         case PCI_HEADER_TYPE_CARDBUS:
     465           0 :                 return PCI_CB_CAPABILITY_LIST;
     466             :         }
     467             : 
     468           0 :         return 0;
     469             : }
     470             : 
     471             : /**
     472             :  * pci_find_capability - query for devices' capabilities
     473             :  * @dev: PCI device to query
     474             :  * @cap: capability code
     475             :  *
     476             :  * Tell if a device supports a given PCI capability.
     477             :  * Returns the address of the requested capability structure within the
     478             :  * device's PCI configuration space or 0 in case the device does not
     479             :  * support it.  Possible values for @cap include:
     480             :  *
     481             :  *  %PCI_CAP_ID_PM           Power Management
     482             :  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
     483             :  *  %PCI_CAP_ID_VPD          Vital Product Data
     484             :  *  %PCI_CAP_ID_SLOTID       Slot Identification
     485             :  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
     486             :  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
     487             :  *  %PCI_CAP_ID_PCIX         PCI-X
     488             :  *  %PCI_CAP_ID_EXP          PCI Express
     489             :  */
     490           0 : u8 pci_find_capability(struct pci_dev *dev, int cap)
     491             : {
     492             :         u8 pos;
     493             : 
     494           0 :         pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
     495           0 :         if (pos)
     496           0 :                 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
     497             : 
     498           0 :         return pos;
     499             : }
     500             : EXPORT_SYMBOL(pci_find_capability);
     501             : 
     502             : /**
     503             :  * pci_bus_find_capability - query for devices' capabilities
     504             :  * @bus: the PCI bus to query
     505             :  * @devfn: PCI device to query
     506             :  * @cap: capability code
     507             :  *
     508             :  * Like pci_find_capability() but works for PCI devices that do not have a
     509             :  * pci_dev structure set up yet.
     510             :  *
     511             :  * Returns the address of the requested capability structure within the
     512             :  * device's PCI configuration space or 0 in case the device does not
     513             :  * support it.
     514             :  */
     515           0 : u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
     516             : {
     517             :         u8 hdr_type, pos;
     518             : 
     519           0 :         pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
     520             : 
     521           0 :         pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
     522           0 :         if (pos)
     523           0 :                 pos = __pci_find_next_cap(bus, devfn, pos, cap);
     524             : 
     525           0 :         return pos;
     526             : }
     527             : EXPORT_SYMBOL(pci_bus_find_capability);
     528             : 
     529             : /**
     530             :  * pci_find_next_ext_capability - Find an extended capability
     531             :  * @dev: PCI device to query
     532             :  * @start: address at which to start looking (0 to start at beginning of list)
     533             :  * @cap: capability code
     534             :  *
     535             :  * Returns the address of the next matching extended capability structure
     536             :  * within the device's PCI configuration space or 0 if the device does
     537             :  * not support it.  Some capabilities can occur several times, e.g., the
     538             :  * vendor-specific capability, and this provides a way to find them all.
     539             :  */
     540           0 : u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
     541             : {
     542             :         u32 header;
     543             :         int ttl;
     544           0 :         u16 pos = PCI_CFG_SPACE_SIZE;
     545             : 
     546             :         /* minimum 8 bytes per capability */
     547           0 :         ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
     548             : 
     549           0 :         if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
     550             :                 return 0;
     551             : 
     552           0 :         if (start)
     553           0 :                 pos = start;
     554             : 
     555           0 :         if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
     556             :                 return 0;
     557             : 
     558             :         /*
     559             :          * If we have no capabilities, this is indicated by cap ID,
     560             :          * cap version and next pointer all being 0.
     561             :          */
     562           0 :         if (header == 0)
     563             :                 return 0;
     564             : 
     565           0 :         while (ttl-- > 0) {
     566           0 :                 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
     567             :                         return pos;
     568             : 
     569           0 :                 pos = PCI_EXT_CAP_NEXT(header);
     570           0 :                 if (pos < PCI_CFG_SPACE_SIZE)
     571             :                         break;
     572             : 
     573           0 :                 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
     574             :                         break;
     575             :         }
     576             : 
     577             :         return 0;
     578             : }
     579             : EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
     580             : 
     581             : /**
     582             :  * pci_find_ext_capability - Find an extended capability
     583             :  * @dev: PCI device to query
     584             :  * @cap: capability code
     585             :  *
     586             :  * Returns the address of the requested extended capability structure
     587             :  * within the device's PCI configuration space or 0 if the device does
     588             :  * not support it.  Possible values for @cap include:
     589             :  *
     590             :  *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
     591             :  *  %PCI_EXT_CAP_ID_VC          Virtual Channel
     592             :  *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
     593             :  *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
     594             :  */
     595           0 : u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
     596             : {
     597           0 :         return pci_find_next_ext_capability(dev, 0, cap);
     598             : }
     599             : EXPORT_SYMBOL_GPL(pci_find_ext_capability);
     600             : 
     601             : /**
     602             :  * pci_get_dsn - Read and return the 8-byte Device Serial Number
     603             :  * @dev: PCI device to query
     604             :  *
     605             :  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
     606             :  * Number.
     607             :  *
     608             :  * Returns the DSN, or zero if the capability does not exist.
     609             :  */
     610           0 : u64 pci_get_dsn(struct pci_dev *dev)
     611             : {
     612             :         u32 dword;
     613             :         u64 dsn;
     614             :         int pos;
     615             : 
     616           0 :         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
     617           0 :         if (!pos)
     618             :                 return 0;
     619             : 
     620             :         /*
     621             :          * The Device Serial Number is two dwords offset 4 bytes from the
     622             :          * capability position. The specification says that the first dword is
     623             :          * the lower half, and the second dword is the upper half.
     624             :          */
     625           0 :         pos += 4;
     626           0 :         pci_read_config_dword(dev, pos, &dword);
     627           0 :         dsn = (u64)dword;
     628           0 :         pci_read_config_dword(dev, pos + 4, &dword);
     629           0 :         dsn |= ((u64)dword) << 32;
     630             : 
     631           0 :         return dsn;
     632             : }
     633             : EXPORT_SYMBOL_GPL(pci_get_dsn);
     634             : 
     635           0 : static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
     636             : {
     637           0 :         int rc, ttl = PCI_FIND_CAP_TTL;
     638             :         u8 cap, mask;
     639             : 
     640           0 :         if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
     641             :                 mask = HT_3BIT_CAP_MASK;
     642             :         else
     643           0 :                 mask = HT_5BIT_CAP_MASK;
     644             : 
     645           0 :         pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
     646             :                                       PCI_CAP_ID_HT, &ttl);
     647           0 :         while (pos) {
     648           0 :                 rc = pci_read_config_byte(dev, pos + 3, &cap);
     649           0 :                 if (rc != PCIBIOS_SUCCESSFUL)
     650             :                         return 0;
     651             : 
     652           0 :                 if ((cap & mask) == ht_cap)
     653             :                         return pos;
     654             : 
     655           0 :                 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
     656           0 :                                               pos + PCI_CAP_LIST_NEXT,
     657             :                                               PCI_CAP_ID_HT, &ttl);
     658             :         }
     659             : 
     660             :         return 0;
     661             : }
     662             : 
     663             : /**
     664             :  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
     665             :  * @dev: PCI device to query
     666             :  * @pos: Position from which to continue searching
     667             :  * @ht_cap: HyperTransport capability code
     668             :  *
     669             :  * To be used in conjunction with pci_find_ht_capability() to search for
     670             :  * all capabilities matching @ht_cap. @pos should always be a value returned
     671             :  * from pci_find_ht_capability().
     672             :  *
     673             :  * NB. To be 100% safe against broken PCI devices, the caller should take
     674             :  * steps to avoid an infinite loop.
     675             :  */
     676           0 : u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
     677             : {
     678           0 :         return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
     679             : }
     680             : EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
     681             : 
     682             : /**
     683             :  * pci_find_ht_capability - query a device's HyperTransport capabilities
     684             :  * @dev: PCI device to query
     685             :  * @ht_cap: HyperTransport capability code
     686             :  *
     687             :  * Tell if a device supports a given HyperTransport capability.
     688             :  * Returns an address within the device's PCI configuration space
     689             :  * or 0 in case the device does not support the request capability.
     690             :  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
     691             :  * which has a HyperTransport capability matching @ht_cap.
     692             :  */
     693           0 : u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
     694             : {
     695             :         u8 pos;
     696             : 
     697           0 :         pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
     698           0 :         if (pos)
     699           0 :                 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
     700             : 
     701           0 :         return pos;
     702             : }
     703             : EXPORT_SYMBOL_GPL(pci_find_ht_capability);
     704             : 
     705             : /**
     706             :  * pci_find_vsec_capability - Find a vendor-specific extended capability
     707             :  * @dev: PCI device to query
     708             :  * @vendor: Vendor ID for which capability is defined
     709             :  * @cap: Vendor-specific capability ID
     710             :  *
     711             :  * If @dev has Vendor ID @vendor, search for a VSEC capability with
     712             :  * VSEC ID @cap. If found, return the capability offset in
     713             :  * config space; otherwise return 0.
     714             :  */
     715           0 : u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
     716             : {
     717           0 :         u16 vsec = 0;
     718             :         u32 header;
     719             : 
     720           0 :         if (vendor != dev->vendor)
     721             :                 return 0;
     722             : 
     723           0 :         while ((vsec = pci_find_next_ext_capability(dev, vsec,
     724             :                                                      PCI_EXT_CAP_ID_VNDR))) {
     725           0 :                 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
     726           0 :                                           &header) == PCIBIOS_SUCCESSFUL &&
     727           0 :                     PCI_VNDR_HEADER_ID(header) == cap)
     728             :                         return vsec;
     729             :         }
     730             : 
     731             :         return 0;
     732             : }
     733             : EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
     734             : 
     735             : /**
     736             :  * pci_find_dvsec_capability - Find DVSEC for vendor
     737             :  * @dev: PCI device to query
     738             :  * @vendor: Vendor ID to match for the DVSEC
     739             :  * @dvsec: Designated Vendor-specific capability ID
     740             :  *
     741             :  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
     742             :  * offset in config space; otherwise return 0.
     743             :  */
     744           0 : u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
     745             : {
     746             :         int pos;
     747             : 
     748           0 :         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
     749           0 :         if (!pos)
     750             :                 return 0;
     751             : 
     752           0 :         while (pos) {
     753             :                 u16 v, id;
     754             : 
     755           0 :                 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
     756           0 :                 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
     757           0 :                 if (vendor == v && dvsec == id)
     758           0 :                         return pos;
     759             : 
     760           0 :                 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
     761             :         }
     762             : 
     763             :         return 0;
     764             : }
     765             : EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
     766             : 
     767             : /**
     768             :  * pci_find_parent_resource - return resource region of parent bus of given
     769             :  *                            region
     770             :  * @dev: PCI device structure contains resources to be searched
     771             :  * @res: child resource record for which parent is sought
     772             :  *
     773             :  * For given resource region of given device, return the resource region of
     774             :  * parent bus the given region is contained in.
     775             :  */
     776           0 : struct resource *pci_find_parent_resource(const struct pci_dev *dev,
     777             :                                           struct resource *res)
     778             : {
     779           0 :         const struct pci_bus *bus = dev->bus;
     780             :         struct resource *r;
     781             :         int i;
     782             : 
     783           0 :         pci_bus_for_each_resource(bus, r, i) {
     784           0 :                 if (!r)
     785           0 :                         continue;
     786           0 :                 if (resource_contains(r, res)) {
     787             : 
     788             :                         /*
     789             :                          * If the window is prefetchable but the BAR is
     790             :                          * not, the allocator made a mistake.
     791             :                          */
     792           0 :                         if (r->flags & IORESOURCE_PREFETCH &&
     793           0 :                             !(res->flags & IORESOURCE_PREFETCH))
     794             :                                 return NULL;
     795             : 
     796             :                         /*
     797             :                          * If we're below a transparent bridge, there may
     798             :                          * be both a positively-decoded aperture and a
     799             :                          * subtractively-decoded region that contain the BAR.
     800             :                          * We want the positively-decoded one, so this depends
     801             :                          * on pci_bus_for_each_resource() giving us those
     802             :                          * first.
     803             :                          */
     804           0 :                         return r;
     805             :                 }
     806             :         }
     807             :         return NULL;
     808             : }
     809             : EXPORT_SYMBOL(pci_find_parent_resource);
     810             : 
     811             : /**
     812             :  * pci_find_resource - Return matching PCI device resource
     813             :  * @dev: PCI device to query
     814             :  * @res: Resource to look for
     815             :  *
     816             :  * Goes over standard PCI resources (BARs) and checks if the given resource
     817             :  * is partially or fully contained in any of them. In that case the
     818             :  * matching resource is returned, %NULL otherwise.
     819             :  */
     820           0 : struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
     821             : {
     822             :         int i;
     823             : 
     824           0 :         for (i = 0; i < PCI_STD_NUM_BARS; i++) {
     825           0 :                 struct resource *r = &dev->resource[i];
     826             : 
     827           0 :                 if (r->start && resource_contains(r, res))
     828             :                         return r;
     829             :         }
     830             : 
     831             :         return NULL;
     832             : }
     833             : EXPORT_SYMBOL(pci_find_resource);
     834             : 
     835             : /**
     836             :  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
     837             :  * @dev: the PCI device to operate on
     838             :  * @pos: config space offset of status word
     839             :  * @mask: mask of bit(s) to care about in status word
     840             :  *
     841             :  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
     842             :  */
     843           0 : int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
     844             : {
     845             :         int i;
     846             : 
     847             :         /* Wait for Transaction Pending bit clean */
     848           0 :         for (i = 0; i < 4; i++) {
     849             :                 u16 status;
     850           0 :                 if (i)
     851           0 :                         msleep((1 << (i - 1)) * 100);
     852             : 
     853           0 :                 pci_read_config_word(dev, pos, &status);
     854           0 :                 if (!(status & mask))
     855           0 :                         return 1;
     856             :         }
     857             : 
     858             :         return 0;
     859             : }
     860             : 
     861             : static int pci_acs_enable;
     862             : 
     863             : /**
     864             :  * pci_request_acs - ask for ACS to be enabled if supported
     865             :  */
     866           0 : void pci_request_acs(void)
     867             : {
     868           0 :         pci_acs_enable = 1;
     869           0 : }
     870             : 
     871             : static const char *disable_acs_redir_param;
     872             : 
     873             : /**
     874             :  * pci_disable_acs_redir - disable ACS redirect capabilities
     875             :  * @dev: the PCI device
     876             :  *
     877             :  * For only devices specified in the disable_acs_redir parameter.
     878             :  */
     879           0 : static void pci_disable_acs_redir(struct pci_dev *dev)
     880             : {
     881           0 :         int ret = 0;
     882             :         const char *p;
     883             :         int pos;
     884             :         u16 ctrl;
     885             : 
     886           0 :         if (!disable_acs_redir_param)
     887           0 :                 return;
     888             : 
     889           0 :         p = disable_acs_redir_param;
     890           0 :         while (*p) {
     891           0 :                 ret = pci_dev_str_match(dev, p, &p);
     892           0 :                 if (ret < 0) {
     893           0 :                         pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
     894             :                                      disable_acs_redir_param);
     895             : 
     896             :                         break;
     897           0 :                 } else if (ret == 1) {
     898             :                         /* Found a match */
     899             :                         break;
     900             :                 }
     901             : 
     902           0 :                 if (*p != ';' && *p != ',') {
     903             :                         /* End of param or invalid format */
     904             :                         break;
     905             :                 }
     906           0 :                 p++;
     907             :         }
     908             : 
     909           0 :         if (ret != 1)
     910             :                 return;
     911             : 
     912           0 :         if (!pci_dev_specific_disable_acs_redir(dev))
     913             :                 return;
     914             : 
     915           0 :         pos = dev->acs_cap;
     916           0 :         if (!pos) {
     917           0 :                 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
     918           0 :                 return;
     919             :         }
     920             : 
     921           0 :         pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
     922             : 
     923             :         /* P2P Request & Completion Redirect */
     924           0 :         ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
     925             : 
     926           0 :         pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
     927             : 
     928           0 :         pci_info(dev, "disabled ACS redirect\n");
     929             : }
     930             : 
     931             : /**
     932             :  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
     933             :  * @dev: the PCI device
     934             :  */
     935           0 : static void pci_std_enable_acs(struct pci_dev *dev)
     936             : {
     937             :         int pos;
     938             :         u16 cap;
     939             :         u16 ctrl;
     940             : 
     941           0 :         pos = dev->acs_cap;
     942           0 :         if (!pos)
     943           0 :                 return;
     944             : 
     945           0 :         pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
     946           0 :         pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
     947             : 
     948             :         /* Source Validation */
     949           0 :         ctrl |= (cap & PCI_ACS_SV);
     950             : 
     951             :         /* P2P Request Redirect */
     952           0 :         ctrl |= (cap & PCI_ACS_RR);
     953             : 
     954             :         /* P2P Completion Redirect */
     955           0 :         ctrl |= (cap & PCI_ACS_CR);
     956             : 
     957             :         /* Upstream Forwarding */
     958           0 :         ctrl |= (cap & PCI_ACS_UF);
     959             : 
     960             :         /* Enable Translation Blocking for external devices and noats */
     961           0 :         if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
     962           0 :                 ctrl |= (cap & PCI_ACS_TB);
     963             : 
     964           0 :         pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
     965             : }
     966             : 
     967             : /**
     968             :  * pci_enable_acs - enable ACS if hardware support it
     969             :  * @dev: the PCI device
     970             :  */
     971           0 : static void pci_enable_acs(struct pci_dev *dev)
     972             : {
     973           0 :         if (!pci_acs_enable)
     974             :                 goto disable_acs_redir;
     975             : 
     976           0 :         if (!pci_dev_specific_enable_acs(dev))
     977             :                 goto disable_acs_redir;
     978             : 
     979           0 :         pci_std_enable_acs(dev);
     980             : 
     981             : disable_acs_redir:
     982             :         /*
     983             :          * Note: pci_disable_acs_redir() must be called even if ACS was not
     984             :          * enabled by the kernel because it may have been enabled by
     985             :          * platform firmware.  So if we are told to disable it, we should
     986             :          * always disable it after setting the kernel's default
     987             :          * preferences.
     988             :          */
     989           0 :         pci_disable_acs_redir(dev);
     990           0 : }
     991             : 
     992             : /**
     993             :  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
     994             :  * @dev: PCI device to have its BARs restored
     995             :  *
     996             :  * Restore the BAR values for a given device, so as to make it
     997             :  * accessible by its driver.
     998             :  */
     999             : static void pci_restore_bars(struct pci_dev *dev)
    1000             : {
    1001             :         int i;
    1002             : 
    1003           0 :         for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
    1004           0 :                 pci_update_resource(dev, i);
    1005             : }
    1006             : 
    1007             : static inline bool platform_pci_power_manageable(struct pci_dev *dev)
    1008             : {
    1009             :         if (pci_use_mid_pm())
    1010             :                 return true;
    1011             : 
    1012           0 :         return acpi_pci_power_manageable(dev);
    1013             : }
    1014             : 
    1015             : static inline int platform_pci_set_power_state(struct pci_dev *dev,
    1016             :                                                pci_power_t t)
    1017             : {
    1018             :         if (pci_use_mid_pm())
    1019             :                 return mid_pci_set_power_state(dev, t);
    1020             : 
    1021           0 :         return acpi_pci_set_power_state(dev, t);
    1022             : }
    1023             : 
    1024             : static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
    1025             : {
    1026             :         if (pci_use_mid_pm())
    1027             :                 return mid_pci_get_power_state(dev);
    1028             : 
    1029           0 :         return acpi_pci_get_power_state(dev);
    1030             : }
    1031             : 
    1032             : static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
    1033             : {
    1034             :         if (!pci_use_mid_pm())
    1035             :                 acpi_pci_refresh_power_state(dev);
    1036             : }
    1037             : 
    1038             : static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
    1039             : {
    1040             :         if (pci_use_mid_pm())
    1041             :                 return PCI_POWER_ERROR;
    1042             : 
    1043             :         return acpi_pci_choose_state(dev);
    1044             : }
    1045             : 
    1046             : static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
    1047             : {
    1048             :         if (pci_use_mid_pm())
    1049             :                 return PCI_POWER_ERROR;
    1050             : 
    1051           0 :         return acpi_pci_wakeup(dev, enable);
    1052             : }
    1053             : 
    1054             : static inline bool platform_pci_need_resume(struct pci_dev *dev)
    1055             : {
    1056             :         if (pci_use_mid_pm())
    1057             :                 return false;
    1058             : 
    1059           0 :         return acpi_pci_need_resume(dev);
    1060             : }
    1061             : 
    1062             : static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
    1063             : {
    1064             :         if (pci_use_mid_pm())
    1065             :                 return false;
    1066             : 
    1067           0 :         return acpi_pci_bridge_d3(dev);
    1068             : }
    1069             : 
    1070             : /**
    1071             :  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
    1072             :  *                           given PCI device
    1073             :  * @dev: PCI device to handle.
    1074             :  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
    1075             :  *
    1076             :  * RETURN VALUE:
    1077             :  * -EINVAL if the requested state is invalid.
    1078             :  * -EIO if device does not support PCI PM or its PM capabilities register has a
    1079             :  * wrong version, or device doesn't support the requested state.
    1080             :  * 0 if device already is in the requested state.
    1081             :  * 0 if device's power state has been successfully changed.
    1082             :  */
    1083           0 : static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
    1084             : {
    1085             :         u16 pmcsr;
    1086           0 :         bool need_restore = false;
    1087             : 
    1088             :         /* Check if we're already there */
    1089           0 :         if (dev->current_state == state)
    1090             :                 return 0;
    1091             : 
    1092           0 :         if (!dev->pm_cap)
    1093             :                 return -EIO;
    1094             : 
    1095           0 :         if (state < PCI_D0 || state > PCI_D3hot)
    1096             :                 return -EINVAL;
    1097             : 
    1098             :         /*
    1099             :          * Validate transition: We can enter D0 from any state, but if
    1100             :          * we're already in a low-power state, we can only go deeper.  E.g.,
    1101             :          * we can go from D1 to D3, but we can't go directly from D3 to D1;
    1102             :          * we'd have to go from D3 to D0, then to D1.
    1103             :          */
    1104           0 :         if (state != PCI_D0 && dev->current_state <= PCI_D3cold
    1105           0 :             && dev->current_state > state) {
    1106           0 :                 pci_err(dev, "invalid power transition (from %s to %s)\n",
    1107             :                         pci_power_name(dev->current_state),
    1108             :                         pci_power_name(state));
    1109           0 :                 return -EINVAL;
    1110             :         }
    1111             : 
    1112             :         /* Check if this device supports the desired state */
    1113           0 :         if ((state == PCI_D1 && !dev->d1_support)
    1114           0 :            || (state == PCI_D2 && !dev->d2_support))
    1115             :                 return -EIO;
    1116             : 
    1117           0 :         pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
    1118           0 :         if (PCI_POSSIBLE_ERROR(pmcsr)) {
    1119           0 :                 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
    1120             :                         pci_power_name(dev->current_state),
    1121             :                         pci_power_name(state));
    1122           0 :                 return -EIO;
    1123             :         }
    1124             : 
    1125             :         /*
    1126             :          * If we're (effectively) in D3, force entire word to 0.
    1127             :          * This doesn't affect PME_Status, disables PME_En, and
    1128             :          * sets PowerState to 0.
    1129             :          */
    1130           0 :         switch (dev->current_state) {
    1131             :         case PCI_D0:
    1132             :         case PCI_D1:
    1133             :         case PCI_D2:
    1134           0 :                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
    1135           0 :                 pmcsr |= state;
    1136           0 :                 break;
    1137             :         case PCI_D3hot:
    1138             :         case PCI_D3cold:
    1139             :         case PCI_UNKNOWN: /* Boot-up */
    1140           0 :                 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
    1141             :                  && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
    1142           0 :                         need_restore = true;
    1143             :                 fallthrough;    /* force to D0 */
    1144             :         default:
    1145           0 :                 pmcsr = 0;
    1146           0 :                 break;
    1147             :         }
    1148             : 
    1149             :         /* Enter specified state */
    1150           0 :         pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
    1151             : 
    1152             :         /*
    1153             :          * Mandatory power management transition delays; see PCI PM 1.1
    1154             :          * 5.6.1 table 18
    1155             :          */
    1156           0 :         if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
    1157           0 :                 pci_dev_d3_sleep(dev);
    1158           0 :         else if (state == PCI_D2 || dev->current_state == PCI_D2)
    1159             :                 udelay(PCI_PM_D2_DELAY);
    1160             : 
    1161           0 :         pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
    1162           0 :         dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
    1163           0 :         if (dev->current_state != state)
    1164           0 :                 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
    1165             :                          pci_power_name(dev->current_state),
    1166             :                          pci_power_name(state));
    1167             : 
    1168             :         /*
    1169             :          * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
    1170             :          * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
    1171             :          * from D3hot to D0 _may_ perform an internal reset, thereby
    1172             :          * going to "D0 Uninitialized" rather than "D0 Initialized".
    1173             :          * For example, at least some versions of the 3c905B and the
    1174             :          * 3c556B exhibit this behaviour.
    1175             :          *
    1176             :          * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
    1177             :          * devices in a D3hot state at boot.  Consequently, we need to
    1178             :          * restore at least the BARs so that the device will be
    1179             :          * accessible to its driver.
    1180             :          */
    1181           0 :         if (need_restore)
    1182             :                 pci_restore_bars(dev);
    1183             : 
    1184           0 :         if (dev->bus->self)
    1185           0 :                 pcie_aspm_pm_state_change(dev->bus->self);
    1186             : 
    1187             :         return 0;
    1188             : }
    1189             : 
    1190             : /**
    1191             :  * pci_update_current_state - Read power state of given device and cache it
    1192             :  * @dev: PCI device to handle.
    1193             :  * @state: State to cache in case the device doesn't have the PM capability
    1194             :  *
    1195             :  * The power state is read from the PMCSR register, which however is
    1196             :  * inaccessible in D3cold.  The platform firmware is therefore queried first
    1197             :  * to detect accessibility of the register.  In case the platform firmware
    1198             :  * reports an incorrect state or the device isn't power manageable by the
    1199             :  * platform at all, we try to detect D3cold by testing accessibility of the
    1200             :  * vendor ID in config space.
    1201             :  */
    1202           0 : void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
    1203             : {
    1204           0 :         if (platform_pci_get_power_state(dev) == PCI_D3cold ||
    1205           0 :             !pci_device_is_present(dev)) {
    1206           0 :                 dev->current_state = PCI_D3cold;
    1207           0 :         } else if (dev->pm_cap) {
    1208             :                 u16 pmcsr;
    1209             : 
    1210           0 :                 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
    1211           0 :                 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
    1212             :         } else {
    1213           0 :                 dev->current_state = state;
    1214             :         }
    1215           0 : }
    1216             : 
    1217             : /**
    1218             :  * pci_refresh_power_state - Refresh the given device's power state data
    1219             :  * @dev: Target PCI device.
    1220             :  *
    1221             :  * Ask the platform to refresh the devices power state information and invoke
    1222             :  * pci_update_current_state() to update its current PCI power state.
    1223             :  */
    1224           0 : void pci_refresh_power_state(struct pci_dev *dev)
    1225             : {
    1226           0 :         platform_pci_refresh_power_state(dev);
    1227           0 :         pci_update_current_state(dev, dev->current_state);
    1228           0 : }
    1229             : 
    1230             : /**
    1231             :  * pci_platform_power_transition - Use platform to change device power state
    1232             :  * @dev: PCI device to handle.
    1233             :  * @state: State to put the device into.
    1234             :  */
    1235           0 : int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
    1236             : {
    1237             :         int error;
    1238             : 
    1239           0 :         error = platform_pci_set_power_state(dev, state);
    1240             :         if (!error)
    1241             :                 pci_update_current_state(dev, state);
    1242           0 :         else if (!dev->pm_cap) /* Fall back to PCI_D0 */
    1243           0 :                 dev->current_state = PCI_D0;
    1244             : 
    1245           0 :         return error;
    1246             : }
    1247             : EXPORT_SYMBOL_GPL(pci_platform_power_transition);
    1248             : 
    1249           0 : static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
    1250             : {
    1251           0 :         pm_request_resume(&pci_dev->dev);
    1252           0 :         return 0;
    1253             : }
    1254             : 
    1255             : /**
    1256             :  * pci_resume_bus - Walk given bus and runtime resume devices on it
    1257             :  * @bus: Top bus of the subtree to walk.
    1258             :  */
    1259           0 : void pci_resume_bus(struct pci_bus *bus)
    1260             : {
    1261           0 :         if (bus)
    1262           0 :                 pci_walk_bus(bus, pci_resume_one, NULL);
    1263           0 : }
    1264             : 
    1265           0 : static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
    1266             : {
    1267           0 :         int delay = 1;
    1268             :         u32 id;
    1269             : 
    1270             :         /*
    1271             :          * After reset, the device should not silently discard config
    1272             :          * requests, but it may still indicate that it needs more time by
    1273             :          * responding to them with CRS completions.  The Root Port will
    1274             :          * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
    1275             :          * the read (except when CRS SV is enabled and the read was for the
    1276             :          * Vendor ID; in that case it synthesizes 0x0001 data).
    1277             :          *
    1278             :          * Wait for the device to return a non-CRS completion.  Read the
    1279             :          * Command register instead of Vendor ID so we don't have to
    1280             :          * contend with the CRS SV value.
    1281             :          */
    1282           0 :         pci_read_config_dword(dev, PCI_COMMAND, &id);
    1283           0 :         while (PCI_POSSIBLE_ERROR(id)) {
    1284           0 :                 if (delay > timeout) {
    1285           0 :                         pci_warn(dev, "not ready %dms after %s; giving up\n",
    1286             :                                  delay - 1, reset_type);
    1287           0 :                         return -ENOTTY;
    1288             :                 }
    1289             : 
    1290           0 :                 if (delay > 1000)
    1291           0 :                         pci_info(dev, "not ready %dms after %s; waiting\n",
    1292             :                                  delay - 1, reset_type);
    1293             : 
    1294           0 :                 msleep(delay);
    1295           0 :                 delay *= 2;
    1296           0 :                 pci_read_config_dword(dev, PCI_COMMAND, &id);
    1297             :         }
    1298             : 
    1299           0 :         if (delay > 1000)
    1300           0 :                 pci_info(dev, "ready %dms after %s\n", delay - 1,
    1301             :                          reset_type);
    1302             : 
    1303             :         return 0;
    1304             : }
    1305             : 
    1306             : /**
    1307             :  * pci_power_up - Put the given device into D0
    1308             :  * @dev: PCI device to power up
    1309             :  */
    1310           0 : int pci_power_up(struct pci_dev *dev)
    1311             : {
    1312           0 :         pci_platform_power_transition(dev, PCI_D0);
    1313             : 
    1314             :         /*
    1315             :          * Mandatory power management transition delays are handled in
    1316             :          * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
    1317             :          * corresponding bridge.
    1318             :          */
    1319           0 :         if (dev->runtime_d3cold) {
    1320             :                 /*
    1321             :                  * When powering on a bridge from D3cold, the whole hierarchy
    1322             :                  * may be powered on into D0uninitialized state, resume them to
    1323             :                  * give them a chance to suspend again
    1324             :                  */
    1325           0 :                 pci_resume_bus(dev->subordinate);
    1326             :         }
    1327             : 
    1328           0 :         return pci_raw_set_power_state(dev, PCI_D0);
    1329             : }
    1330             : 
    1331             : /**
    1332             :  * __pci_dev_set_current_state - Set current state of a PCI device
    1333             :  * @dev: Device to handle
    1334             :  * @data: pointer to state to be set
    1335             :  */
    1336           0 : static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
    1337             : {
    1338           0 :         pci_power_t state = *(pci_power_t *)data;
    1339             : 
    1340           0 :         dev->current_state = state;
    1341           0 :         return 0;
    1342             : }
    1343             : 
    1344             : /**
    1345             :  * pci_bus_set_current_state - Walk given bus and set current state of devices
    1346             :  * @bus: Top bus of the subtree to walk.
    1347             :  * @state: state to be set
    1348             :  */
    1349           0 : void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
    1350             : {
    1351           0 :         if (bus)
    1352           0 :                 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
    1353           0 : }
    1354             : 
    1355             : /**
    1356             :  * pci_set_power_state - Set the power state of a PCI device
    1357             :  * @dev: PCI device to handle.
    1358             :  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
    1359             :  *
    1360             :  * Transition a device to a new power state, using the platform firmware and/or
    1361             :  * the device's PCI PM registers.
    1362             :  *
    1363             :  * RETURN VALUE:
    1364             :  * -EINVAL if the requested state is invalid.
    1365             :  * -EIO if device does not support PCI PM or its PM capabilities register has a
    1366             :  * wrong version, or device doesn't support the requested state.
    1367             :  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
    1368             :  * 0 if device already is in the requested state.
    1369             :  * 0 if the transition is to D3 but D3 is not supported.
    1370             :  * 0 if device's power state has been successfully changed.
    1371             :  */
    1372           0 : int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
    1373             : {
    1374             :         int error;
    1375             : 
    1376             :         /* Bound the state we're entering */
    1377           0 :         if (state > PCI_D3cold)
    1378             :                 state = PCI_D3cold;
    1379           0 :         else if (state < PCI_D0)
    1380             :                 state = PCI_D0;
    1381           0 :         else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
    1382             : 
    1383             :                 /*
    1384             :                  * If the device or the parent bridge do not support PCI
    1385             :                  * PM, ignore the request if we're doing anything other
    1386             :                  * than putting it into D0 (which would only happen on
    1387             :                  * boot).
    1388             :                  */
    1389             :                 return 0;
    1390             : 
    1391             :         /* Check if we're already there */
    1392           0 :         if (dev->current_state == state)
    1393             :                 return 0;
    1394             : 
    1395           0 :         if (state == PCI_D0)
    1396           0 :                 return pci_power_up(dev);
    1397             : 
    1398             :         /*
    1399             :          * This device is quirked not to be put into D3, so don't put it in
    1400             :          * D3
    1401             :          */
    1402           0 :         if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
    1403             :                 return 0;
    1404             : 
    1405             :         /*
    1406             :          * To put device in D3cold, we put device into D3hot in native
    1407             :          * way, then put device into D3cold with platform ops
    1408             :          */
    1409           0 :         error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
    1410             :                                         PCI_D3hot : state);
    1411             : 
    1412             :         if (pci_platform_power_transition(dev, state))
    1413             :                 return error;
    1414             : 
    1415             :         /* Powering off a bridge may power off the whole hierarchy */
    1416             :         if (state == PCI_D3cold)
    1417             :                 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
    1418             : 
    1419             :         return 0;
    1420             : }
    1421             : EXPORT_SYMBOL(pci_set_power_state);
    1422             : 
    1423             : #define PCI_EXP_SAVE_REGS       7
    1424             : 
    1425             : static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
    1426             :                                                        u16 cap, bool extended)
    1427             : {
    1428             :         struct pci_cap_saved_state *tmp;
    1429             : 
    1430           0 :         hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
    1431           0 :                 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
    1432             :                         return tmp;
    1433             :         }
    1434             :         return NULL;
    1435             : }
    1436             : 
    1437           0 : struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
    1438             : {
    1439           0 :         return _pci_find_saved_cap(dev, cap, false);
    1440             : }
    1441             : 
    1442           0 : struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
    1443             : {
    1444           0 :         return _pci_find_saved_cap(dev, cap, true);
    1445             : }
    1446             : 
    1447           0 : static int pci_save_pcie_state(struct pci_dev *dev)
    1448             : {
    1449           0 :         int i = 0;
    1450             :         struct pci_cap_saved_state *save_state;
    1451             :         u16 *cap;
    1452             : 
    1453           0 :         if (!pci_is_pcie(dev))
    1454             :                 return 0;
    1455             : 
    1456           0 :         save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
    1457           0 :         if (!save_state) {
    1458           0 :                 pci_err(dev, "buffer not found in %s\n", __func__);
    1459           0 :                 return -ENOMEM;
    1460             :         }
    1461             : 
    1462           0 :         cap = (u16 *)&save_state->cap.data[0];
    1463           0 :         pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
    1464           0 :         pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
    1465           0 :         pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
    1466           0 :         pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
    1467           0 :         pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
    1468           0 :         pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
    1469           0 :         pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
    1470             : 
    1471           0 :         return 0;
    1472             : }
    1473             : 
    1474           0 : void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
    1475             : {
    1476             : #ifdef CONFIG_PCIEASPM
    1477             :         struct pci_dev *bridge;
    1478             :         u32 ctl;
    1479             : 
    1480           0 :         bridge = pci_upstream_bridge(dev);
    1481           0 :         if (bridge && bridge->ltr_path) {
    1482           0 :                 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
    1483           0 :                 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
    1484             :                         pci_dbg(bridge, "re-enabling LTR\n");
    1485             :                         pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
    1486             :                                                  PCI_EXP_DEVCTL2_LTR_EN);
    1487             :                 }
    1488             :         }
    1489             : #endif
    1490           0 : }
    1491             : 
    1492           0 : static void pci_restore_pcie_state(struct pci_dev *dev)
    1493             : {
    1494           0 :         int i = 0;
    1495             :         struct pci_cap_saved_state *save_state;
    1496             :         u16 *cap;
    1497             : 
    1498           0 :         save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
    1499           0 :         if (!save_state)
    1500             :                 return;
    1501             : 
    1502             :         /*
    1503             :          * Downstream ports reset the LTR enable bit when link goes down.
    1504             :          * Check and re-configure the bit here before restoring device.
    1505             :          * PCIe r5.0, sec 7.5.3.16.
    1506             :          */
    1507           0 :         pci_bridge_reconfigure_ltr(dev);
    1508             : 
    1509           0 :         cap = (u16 *)&save_state->cap.data[0];
    1510           0 :         pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
    1511           0 :         pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
    1512           0 :         pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
    1513           0 :         pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
    1514           0 :         pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
    1515           0 :         pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
    1516           0 :         pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
    1517             : }
    1518             : 
    1519           0 : static int pci_save_pcix_state(struct pci_dev *dev)
    1520             : {
    1521             :         int pos;
    1522             :         struct pci_cap_saved_state *save_state;
    1523             : 
    1524           0 :         pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    1525           0 :         if (!pos)
    1526             :                 return 0;
    1527             : 
    1528           0 :         save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
    1529           0 :         if (!save_state) {
    1530           0 :                 pci_err(dev, "buffer not found in %s\n", __func__);
    1531           0 :                 return -ENOMEM;
    1532             :         }
    1533             : 
    1534           0 :         pci_read_config_word(dev, pos + PCI_X_CMD,
    1535           0 :                              (u16 *)save_state->cap.data);
    1536             : 
    1537           0 :         return 0;
    1538             : }
    1539             : 
    1540           0 : static void pci_restore_pcix_state(struct pci_dev *dev)
    1541             : {
    1542           0 :         int i = 0, pos;
    1543             :         struct pci_cap_saved_state *save_state;
    1544             :         u16 *cap;
    1545             : 
    1546           0 :         save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
    1547           0 :         pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    1548           0 :         if (!save_state || !pos)
    1549             :                 return;
    1550           0 :         cap = (u16 *)&save_state->cap.data[0];
    1551             : 
    1552           0 :         pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
    1553             : }
    1554             : 
    1555           0 : static void pci_save_ltr_state(struct pci_dev *dev)
    1556             : {
    1557             :         int ltr;
    1558             :         struct pci_cap_saved_state *save_state;
    1559             :         u32 *cap;
    1560             : 
    1561           0 :         if (!pci_is_pcie(dev))
    1562             :                 return;
    1563             : 
    1564           0 :         ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
    1565           0 :         if (!ltr)
    1566             :                 return;
    1567             : 
    1568           0 :         save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
    1569           0 :         if (!save_state) {
    1570           0 :                 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
    1571           0 :                 return;
    1572             :         }
    1573             : 
    1574             :         /* Some broken devices only support dword access to LTR */
    1575           0 :         cap = &save_state->cap.data[0];
    1576           0 :         pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
    1577             : }
    1578             : 
    1579           0 : static void pci_restore_ltr_state(struct pci_dev *dev)
    1580             : {
    1581             :         struct pci_cap_saved_state *save_state;
    1582             :         int ltr;
    1583             :         u32 *cap;
    1584             : 
    1585           0 :         save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
    1586           0 :         ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
    1587           0 :         if (!save_state || !ltr)
    1588             :                 return;
    1589             : 
    1590             :         /* Some broken devices only support dword access to LTR */
    1591           0 :         cap = &save_state->cap.data[0];
    1592           0 :         pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
    1593             : }
    1594             : 
    1595             : /**
    1596             :  * pci_save_state - save the PCI configuration space of a device before
    1597             :  *                  suspending
    1598             :  * @dev: PCI device that we're dealing with
    1599             :  */
    1600           0 : int pci_save_state(struct pci_dev *dev)
    1601             : {
    1602             :         int i;
    1603             :         /* XXX: 100% dword access ok here? */
    1604           0 :         for (i = 0; i < 16; i++) {
    1605           0 :                 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
    1606             :                 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
    1607             :                         i * 4, dev->saved_config_space[i]);
    1608             :         }
    1609           0 :         dev->state_saved = true;
    1610             : 
    1611           0 :         i = pci_save_pcie_state(dev);
    1612           0 :         if (i != 0)
    1613             :                 return i;
    1614             : 
    1615           0 :         i = pci_save_pcix_state(dev);
    1616           0 :         if (i != 0)
    1617             :                 return i;
    1618             : 
    1619           0 :         pci_save_ltr_state(dev);
    1620           0 :         pci_save_dpc_state(dev);
    1621           0 :         pci_save_aer_state(dev);
    1622           0 :         pci_save_ptm_state(dev);
    1623           0 :         return pci_save_vc_state(dev);
    1624             : }
    1625             : EXPORT_SYMBOL(pci_save_state);
    1626             : 
    1627           0 : static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
    1628             :                                      u32 saved_val, int retry, bool force)
    1629             : {
    1630             :         u32 val;
    1631             : 
    1632           0 :         pci_read_config_dword(pdev, offset, &val);
    1633           0 :         if (!force && val == saved_val)
    1634             :                 return;
    1635             : 
    1636             :         for (;;) {
    1637             :                 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
    1638             :                         offset, val, saved_val);
    1639           0 :                 pci_write_config_dword(pdev, offset, saved_val);
    1640           0 :                 if (retry-- <= 0)
    1641             :                         return;
    1642             : 
    1643           0 :                 pci_read_config_dword(pdev, offset, &val);
    1644           0 :                 if (val == saved_val)
    1645             :                         return;
    1646             : 
    1647             :                 mdelay(1);
    1648             :         }
    1649             : }
    1650             : 
    1651             : static void pci_restore_config_space_range(struct pci_dev *pdev,
    1652             :                                            int start, int end, int retry,
    1653             :                                            bool force)
    1654             : {
    1655             :         int index;
    1656             : 
    1657           0 :         for (index = end; index >= start; index--)
    1658           0 :                 pci_restore_config_dword(pdev, 4 * index,
    1659             :                                          pdev->saved_config_space[index],
    1660             :                                          retry, force);
    1661             : }
    1662             : 
    1663           0 : static void pci_restore_config_space(struct pci_dev *pdev)
    1664             : {
    1665           0 :         if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
    1666             :                 pci_restore_config_space_range(pdev, 10, 15, 0, false);
    1667             :                 /* Restore BARs before the command register. */
    1668             :                 pci_restore_config_space_range(pdev, 4, 9, 10, false);
    1669             :                 pci_restore_config_space_range(pdev, 0, 3, 0, false);
    1670           0 :         } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
    1671             :                 pci_restore_config_space_range(pdev, 12, 15, 0, false);
    1672             : 
    1673             :                 /*
    1674             :                  * Force rewriting of prefetch registers to avoid S3 resume
    1675             :                  * issues on Intel PCI bridges that occur when these
    1676             :                  * registers are not explicitly written.
    1677             :                  */
    1678             :                 pci_restore_config_space_range(pdev, 9, 11, 0, true);
    1679             :                 pci_restore_config_space_range(pdev, 0, 8, 0, false);
    1680             :         } else {
    1681             :                 pci_restore_config_space_range(pdev, 0, 15, 0, false);
    1682             :         }
    1683           0 : }
    1684             : 
    1685           0 : static void pci_restore_rebar_state(struct pci_dev *pdev)
    1686             : {
    1687             :         unsigned int pos, nbars, i;
    1688             :         u32 ctrl;
    1689             : 
    1690           0 :         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
    1691           0 :         if (!pos)
    1692           0 :                 return;
    1693             : 
    1694           0 :         pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    1695           0 :         nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
    1696             :                     PCI_REBAR_CTRL_NBAR_SHIFT;
    1697             : 
    1698           0 :         for (i = 0; i < nbars; i++, pos += 8) {
    1699             :                 struct resource *res;
    1700             :                 int bar_idx, size;
    1701             : 
    1702           0 :                 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    1703           0 :                 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
    1704           0 :                 res = pdev->resource + bar_idx;
    1705           0 :                 size = pci_rebar_bytes_to_size(resource_size(res));
    1706           0 :                 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
    1707           0 :                 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
    1708           0 :                 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
    1709             :         }
    1710             : }
    1711             : 
    1712             : /**
    1713             :  * pci_restore_state - Restore the saved state of a PCI device
    1714             :  * @dev: PCI device that we're dealing with
    1715             :  */
    1716           0 : void pci_restore_state(struct pci_dev *dev)
    1717             : {
    1718           0 :         if (!dev->state_saved)
    1719             :                 return;
    1720             : 
    1721             :         /*
    1722             :          * Restore max latencies (in the LTR capability) before enabling
    1723             :          * LTR itself (in the PCIe capability).
    1724             :          */
    1725           0 :         pci_restore_ltr_state(dev);
    1726             : 
    1727           0 :         pci_restore_pcie_state(dev);
    1728           0 :         pci_restore_pasid_state(dev);
    1729           0 :         pci_restore_pri_state(dev);
    1730           0 :         pci_restore_ats_state(dev);
    1731           0 :         pci_restore_vc_state(dev);
    1732           0 :         pci_restore_rebar_state(dev);
    1733           0 :         pci_restore_dpc_state(dev);
    1734           0 :         pci_restore_ptm_state(dev);
    1735             : 
    1736           0 :         pci_aer_clear_status(dev);
    1737           0 :         pci_restore_aer_state(dev);
    1738             : 
    1739           0 :         pci_restore_config_space(dev);
    1740             : 
    1741           0 :         pci_restore_pcix_state(dev);
    1742           0 :         pci_restore_msi_state(dev);
    1743             : 
    1744             :         /* Restore ACS and IOV configuration state */
    1745           0 :         pci_enable_acs(dev);
    1746           0 :         pci_restore_iov_state(dev);
    1747             : 
    1748           0 :         dev->state_saved = false;
    1749             : }
    1750             : EXPORT_SYMBOL(pci_restore_state);
    1751             : 
    1752             : struct pci_saved_state {
    1753             :         u32 config_space[16];
    1754             :         struct pci_cap_saved_data cap[];
    1755             : };
    1756             : 
    1757             : /**
    1758             :  * pci_store_saved_state - Allocate and return an opaque struct containing
    1759             :  *                         the device saved state.
    1760             :  * @dev: PCI device that we're dealing with
    1761             :  *
    1762             :  * Return NULL if no state or error.
    1763             :  */
    1764           0 : struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
    1765             : {
    1766             :         struct pci_saved_state *state;
    1767             :         struct pci_cap_saved_state *tmp;
    1768             :         struct pci_cap_saved_data *cap;
    1769             :         size_t size;
    1770             : 
    1771           0 :         if (!dev->state_saved)
    1772             :                 return NULL;
    1773             : 
    1774           0 :         size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
    1775             : 
    1776           0 :         hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
    1777           0 :                 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
    1778             : 
    1779           0 :         state = kzalloc(size, GFP_KERNEL);
    1780           0 :         if (!state)
    1781             :                 return NULL;
    1782             : 
    1783           0 :         memcpy(state->config_space, dev->saved_config_space,
    1784             :                sizeof(state->config_space));
    1785             : 
    1786           0 :         cap = state->cap;
    1787           0 :         hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
    1788           0 :                 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
    1789           0 :                 memcpy(cap, &tmp->cap, len);
    1790           0 :                 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
    1791             :         }
    1792             :         /* Empty cap_save terminates list */
    1793             : 
    1794             :         return state;
    1795             : }
    1796             : EXPORT_SYMBOL_GPL(pci_store_saved_state);
    1797             : 
    1798             : /**
    1799             :  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
    1800             :  * @dev: PCI device that we're dealing with
    1801             :  * @state: Saved state returned from pci_store_saved_state()
    1802             :  */
    1803           0 : int pci_load_saved_state(struct pci_dev *dev,
    1804             :                          struct pci_saved_state *state)
    1805             : {
    1806             :         struct pci_cap_saved_data *cap;
    1807             : 
    1808           0 :         dev->state_saved = false;
    1809             : 
    1810           0 :         if (!state)
    1811             :                 return 0;
    1812             : 
    1813           0 :         memcpy(dev->saved_config_space, state->config_space,
    1814             :                sizeof(state->config_space));
    1815             : 
    1816           0 :         cap = state->cap;
    1817           0 :         while (cap->size) {
    1818             :                 struct pci_cap_saved_state *tmp;
    1819             : 
    1820           0 :                 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
    1821           0 :                 if (!tmp || tmp->cap.size != cap->size)
    1822             :                         return -EINVAL;
    1823             : 
    1824           0 :                 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
    1825           0 :                 cap = (struct pci_cap_saved_data *)((u8 *)cap +
    1826           0 :                        sizeof(struct pci_cap_saved_data) + cap->size);
    1827             :         }
    1828             : 
    1829           0 :         dev->state_saved = true;
    1830           0 :         return 0;
    1831             : }
    1832             : EXPORT_SYMBOL_GPL(pci_load_saved_state);
    1833             : 
    1834             : /**
    1835             :  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
    1836             :  *                                 and free the memory allocated for it.
    1837             :  * @dev: PCI device that we're dealing with
    1838             :  * @state: Pointer to saved state returned from pci_store_saved_state()
    1839             :  */
    1840           0 : int pci_load_and_free_saved_state(struct pci_dev *dev,
    1841             :                                   struct pci_saved_state **state)
    1842             : {
    1843           0 :         int ret = pci_load_saved_state(dev, *state);
    1844           0 :         kfree(*state);
    1845           0 :         *state = NULL;
    1846           0 :         return ret;
    1847             : }
    1848             : EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
    1849             : 
    1850           0 : int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
    1851             : {
    1852           0 :         return pci_enable_resources(dev, bars);
    1853             : }
    1854             : 
    1855           0 : static int do_pci_enable_device(struct pci_dev *dev, int bars)
    1856             : {
    1857             :         int err;
    1858             :         struct pci_dev *bridge;
    1859             :         u16 cmd;
    1860             :         u8 pin;
    1861             : 
    1862           0 :         err = pci_set_power_state(dev, PCI_D0);
    1863           0 :         if (err < 0 && err != -EIO)
    1864             :                 return err;
    1865             : 
    1866           0 :         bridge = pci_upstream_bridge(dev);
    1867           0 :         if (bridge)
    1868           0 :                 pcie_aspm_powersave_config_link(bridge);
    1869             : 
    1870           0 :         err = pcibios_enable_device(dev, bars);
    1871           0 :         if (err < 0)
    1872             :                 return err;
    1873           0 :         pci_fixup_device(pci_fixup_enable, dev);
    1874             : 
    1875           0 :         if (dev->msi_enabled || dev->msix_enabled)
    1876             :                 return 0;
    1877             : 
    1878           0 :         pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
    1879           0 :         if (pin) {
    1880           0 :                 pci_read_config_word(dev, PCI_COMMAND, &cmd);
    1881           0 :                 if (cmd & PCI_COMMAND_INTX_DISABLE)
    1882           0 :                         pci_write_config_word(dev, PCI_COMMAND,
    1883             :                                               cmd & ~PCI_COMMAND_INTX_DISABLE);
    1884             :         }
    1885             : 
    1886             :         return 0;
    1887             : }
    1888             : 
    1889             : /**
    1890             :  * pci_reenable_device - Resume abandoned device
    1891             :  * @dev: PCI device to be resumed
    1892             :  *
    1893             :  * NOTE: This function is a backend of pci_default_resume() and is not supposed
    1894             :  * to be called by normal code, write proper resume handler and use it instead.
    1895             :  */
    1896           0 : int pci_reenable_device(struct pci_dev *dev)
    1897             : {
    1898           0 :         if (pci_is_enabled(dev))
    1899           0 :                 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
    1900             :         return 0;
    1901             : }
    1902             : EXPORT_SYMBOL(pci_reenable_device);
    1903             : 
    1904           0 : static void pci_enable_bridge(struct pci_dev *dev)
    1905             : {
    1906             :         struct pci_dev *bridge;
    1907             :         int retval;
    1908             : 
    1909           0 :         bridge = pci_upstream_bridge(dev);
    1910           0 :         if (bridge)
    1911           0 :                 pci_enable_bridge(bridge);
    1912             : 
    1913           0 :         if (pci_is_enabled(dev)) {
    1914           0 :                 if (!dev->is_busmaster)
    1915             :                         pci_set_master(dev);
    1916             :                 return;
    1917             :         }
    1918             : 
    1919           0 :         retval = pci_enable_device(dev);
    1920           0 :         if (retval)
    1921           0 :                 pci_err(dev, "Error enabling bridge (%d), continuing\n",
    1922             :                         retval);
    1923             :         pci_set_master(dev);
    1924             : }
    1925             : 
    1926           0 : static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
    1927             : {
    1928             :         struct pci_dev *bridge;
    1929             :         int err;
    1930           0 :         int i, bars = 0;
    1931             : 
    1932             :         /*
    1933             :          * Power state could be unknown at this point, either due to a fresh
    1934             :          * boot or a device removal call.  So get the current power state
    1935             :          * so that things like MSI message writing will behave as expected
    1936             :          * (e.g. if the device really is in D0 at enable time).
    1937             :          */
    1938           0 :         pci_update_current_state(dev, dev->current_state);
    1939             : 
    1940           0 :         if (atomic_inc_return(&dev->enable_cnt) > 1)
    1941             :                 return 0;               /* already enabled */
    1942             : 
    1943           0 :         bridge = pci_upstream_bridge(dev);
    1944           0 :         if (bridge)
    1945           0 :                 pci_enable_bridge(bridge);
    1946             : 
    1947             :         /* only skip sriov related */
    1948           0 :         for (i = 0; i <= PCI_ROM_RESOURCE; i++)
    1949           0 :                 if (dev->resource[i].flags & flags)
    1950           0 :                         bars |= (1 << i);
    1951           0 :         for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
    1952           0 :                 if (dev->resource[i].flags & flags)
    1953           0 :                         bars |= (1 << i);
    1954             : 
    1955           0 :         err = do_pci_enable_device(dev, bars);
    1956           0 :         if (err < 0)
    1957           0 :                 atomic_dec(&dev->enable_cnt);
    1958             :         return err;
    1959             : }
    1960             : 
    1961             : /**
    1962             :  * pci_enable_device_io - Initialize a device for use with IO space
    1963             :  * @dev: PCI device to be initialized
    1964             :  *
    1965             :  * Initialize device before it's used by a driver. Ask low-level code
    1966             :  * to enable I/O resources. Wake up the device if it was suspended.
    1967             :  * Beware, this function can fail.
    1968             :  */
    1969           0 : int pci_enable_device_io(struct pci_dev *dev)
    1970             : {
    1971           0 :         return pci_enable_device_flags(dev, IORESOURCE_IO);
    1972             : }
    1973             : EXPORT_SYMBOL(pci_enable_device_io);
    1974             : 
    1975             : /**
    1976             :  * pci_enable_device_mem - Initialize a device for use with Memory space
    1977             :  * @dev: PCI device to be initialized
    1978             :  *
    1979             :  * Initialize device before it's used by a driver. Ask low-level code
    1980             :  * to enable Memory resources. Wake up the device if it was suspended.
    1981             :  * Beware, this function can fail.
    1982             :  */
    1983           0 : int pci_enable_device_mem(struct pci_dev *dev)
    1984             : {
    1985           0 :         return pci_enable_device_flags(dev, IORESOURCE_MEM);
    1986             : }
    1987             : EXPORT_SYMBOL(pci_enable_device_mem);
    1988             : 
    1989             : /**
    1990             :  * pci_enable_device - Initialize device before it's used by a driver.
    1991             :  * @dev: PCI device to be initialized
    1992             :  *
    1993             :  * Initialize device before it's used by a driver. Ask low-level code
    1994             :  * to enable I/O and memory. Wake up the device if it was suspended.
    1995             :  * Beware, this function can fail.
    1996             :  *
    1997             :  * Note we don't actually enable the device many times if we call
    1998             :  * this function repeatedly (we just increment the count).
    1999             :  */
    2000           0 : int pci_enable_device(struct pci_dev *dev)
    2001             : {
    2002           0 :         return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
    2003             : }
    2004             : EXPORT_SYMBOL(pci_enable_device);
    2005             : 
    2006             : /*
    2007             :  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
    2008             :  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
    2009             :  * there's no need to track it separately.  pci_devres is initialized
    2010             :  * when a device is enabled using managed PCI device enable interface.
    2011             :  */
    2012             : struct pci_devres {
    2013             :         unsigned int enabled:1;
    2014             :         unsigned int pinned:1;
    2015             :         unsigned int orig_intx:1;
    2016             :         unsigned int restore_intx:1;
    2017             :         unsigned int mwi:1;
    2018             :         u32 region_mask;
    2019             : };
    2020             : 
    2021           0 : static void pcim_release(struct device *gendev, void *res)
    2022             : {
    2023           0 :         struct pci_dev *dev = to_pci_dev(gendev);
    2024           0 :         struct pci_devres *this = res;
    2025             :         int i;
    2026             : 
    2027           0 :         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
    2028           0 :                 if (this->region_mask & (1 << i))
    2029           0 :                         pci_release_region(dev, i);
    2030             : 
    2031           0 :         if (this->mwi)
    2032           0 :                 pci_clear_mwi(dev);
    2033             : 
    2034           0 :         if (this->restore_intx)
    2035           0 :                 pci_intx(dev, this->orig_intx);
    2036             : 
    2037           0 :         if (this->enabled && !this->pinned)
    2038           0 :                 pci_disable_device(dev);
    2039           0 : }
    2040             : 
    2041           0 : static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
    2042             : {
    2043             :         struct pci_devres *dr, *new_dr;
    2044             : 
    2045           0 :         dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
    2046           0 :         if (dr)
    2047             :                 return dr;
    2048             : 
    2049           0 :         new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
    2050           0 :         if (!new_dr)
    2051             :                 return NULL;
    2052           0 :         return devres_get(&pdev->dev, new_dr, NULL, NULL);
    2053             : }
    2054             : 
    2055             : static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
    2056             : {
    2057           0 :         if (pci_is_managed(pdev))
    2058           0 :                 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
    2059             :         return NULL;
    2060             : }
    2061             : 
    2062             : /**
    2063             :  * pcim_enable_device - Managed pci_enable_device()
    2064             :  * @pdev: PCI device to be initialized
    2065             :  *
    2066             :  * Managed pci_enable_device().
    2067             :  */
    2068           0 : int pcim_enable_device(struct pci_dev *pdev)
    2069             : {
    2070             :         struct pci_devres *dr;
    2071             :         int rc;
    2072             : 
    2073           0 :         dr = get_pci_dr(pdev);
    2074           0 :         if (unlikely(!dr))
    2075             :                 return -ENOMEM;
    2076           0 :         if (dr->enabled)
    2077             :                 return 0;
    2078             : 
    2079           0 :         rc = pci_enable_device(pdev);
    2080           0 :         if (!rc) {
    2081           0 :                 pdev->is_managed = 1;
    2082           0 :                 dr->enabled = 1;
    2083             :         }
    2084             :         return rc;
    2085             : }
    2086             : EXPORT_SYMBOL(pcim_enable_device);
    2087             : 
    2088             : /**
    2089             :  * pcim_pin_device - Pin managed PCI device
    2090             :  * @pdev: PCI device to pin
    2091             :  *
    2092             :  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
    2093             :  * driver detach.  @pdev must have been enabled with
    2094             :  * pcim_enable_device().
    2095             :  */
    2096           0 : void pcim_pin_device(struct pci_dev *pdev)
    2097             : {
    2098             :         struct pci_devres *dr;
    2099             : 
    2100           0 :         dr = find_pci_dr(pdev);
    2101           0 :         WARN_ON(!dr || !dr->enabled);
    2102           0 :         if (dr)
    2103           0 :                 dr->pinned = 1;
    2104           0 : }
    2105             : EXPORT_SYMBOL(pcim_pin_device);
    2106             : 
    2107             : /*
    2108             :  * pcibios_device_add - provide arch specific hooks when adding device dev
    2109             :  * @dev: the PCI device being added
    2110             :  *
    2111             :  * Permits the platform to provide architecture specific functionality when
    2112             :  * devices are added. This is the default implementation. Architecture
    2113             :  * implementations can override this.
    2114             :  */
    2115           0 : int __weak pcibios_device_add(struct pci_dev *dev)
    2116             : {
    2117           0 :         return 0;
    2118             : }
    2119             : 
    2120             : /**
    2121             :  * pcibios_release_device - provide arch specific hooks when releasing
    2122             :  *                          device dev
    2123             :  * @dev: the PCI device being released
    2124             :  *
    2125             :  * Permits the platform to provide architecture specific functionality when
    2126             :  * devices are released. This is the default implementation. Architecture
    2127             :  * implementations can override this.
    2128             :  */
    2129           0 : void __weak pcibios_release_device(struct pci_dev *dev) {}
    2130             : 
    2131             : /**
    2132             :  * pcibios_disable_device - disable arch specific PCI resources for device dev
    2133             :  * @dev: the PCI device to disable
    2134             :  *
    2135             :  * Disables architecture specific PCI resources for the device. This
    2136             :  * is the default implementation. Architecture implementations can
    2137             :  * override this.
    2138             :  */
    2139           0 : void __weak pcibios_disable_device(struct pci_dev *dev) {}
    2140             : 
    2141             : /**
    2142             :  * pcibios_penalize_isa_irq - penalize an ISA IRQ
    2143             :  * @irq: ISA IRQ to penalize
    2144             :  * @active: IRQ active or not
    2145             :  *
    2146             :  * Permits the platform to provide architecture-specific functionality when
    2147             :  * penalizing ISA IRQs. This is the default implementation. Architecture
    2148             :  * implementations can override this.
    2149             :  */
    2150           0 : void __weak pcibios_penalize_isa_irq(int irq, int active) {}
    2151             : 
    2152           0 : static void do_pci_disable_device(struct pci_dev *dev)
    2153             : {
    2154             :         u16 pci_command;
    2155             : 
    2156           0 :         pci_read_config_word(dev, PCI_COMMAND, &pci_command);
    2157           0 :         if (pci_command & PCI_COMMAND_MASTER) {
    2158           0 :                 pci_command &= ~PCI_COMMAND_MASTER;
    2159           0 :                 pci_write_config_word(dev, PCI_COMMAND, pci_command);
    2160             :         }
    2161             : 
    2162           0 :         pcibios_disable_device(dev);
    2163           0 : }
    2164             : 
    2165             : /**
    2166             :  * pci_disable_enabled_device - Disable device without updating enable_cnt
    2167             :  * @dev: PCI device to disable
    2168             :  *
    2169             :  * NOTE: This function is a backend of PCI power management routines and is
    2170             :  * not supposed to be called drivers.
    2171             :  */
    2172           0 : void pci_disable_enabled_device(struct pci_dev *dev)
    2173             : {
    2174           0 :         if (pci_is_enabled(dev))
    2175           0 :                 do_pci_disable_device(dev);
    2176           0 : }
    2177             : 
    2178             : /**
    2179             :  * pci_disable_device - Disable PCI device after use
    2180             :  * @dev: PCI device to be disabled
    2181             :  *
    2182             :  * Signal to the system that the PCI device is not in use by the system
    2183             :  * anymore.  This only involves disabling PCI bus-mastering, if active.
    2184             :  *
    2185             :  * Note we don't actually disable the device until all callers of
    2186             :  * pci_enable_device() have called pci_disable_device().
    2187             :  */
    2188           0 : void pci_disable_device(struct pci_dev *dev)
    2189             : {
    2190             :         struct pci_devres *dr;
    2191             : 
    2192           0 :         dr = find_pci_dr(dev);
    2193           0 :         if (dr)
    2194           0 :                 dr->enabled = 0;
    2195             : 
    2196           0 :         dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
    2197             :                       "disabling already-disabled device");
    2198             : 
    2199           0 :         if (atomic_dec_return(&dev->enable_cnt) != 0)
    2200             :                 return;
    2201             : 
    2202           0 :         do_pci_disable_device(dev);
    2203             : 
    2204           0 :         dev->is_busmaster = 0;
    2205             : }
    2206             : EXPORT_SYMBOL(pci_disable_device);
    2207             : 
    2208             : /**
    2209             :  * pcibios_set_pcie_reset_state - set reset state for device dev
    2210             :  * @dev: the PCIe device reset
    2211             :  * @state: Reset state to enter into
    2212             :  *
    2213             :  * Set the PCIe reset state for the device. This is the default
    2214             :  * implementation. Architecture implementations can override this.
    2215             :  */
    2216           0 : int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
    2217             :                                         enum pcie_reset_state state)
    2218             : {
    2219           0 :         return -EINVAL;
    2220             : }
    2221             : 
    2222             : /**
    2223             :  * pci_set_pcie_reset_state - set reset state for device dev
    2224             :  * @dev: the PCIe device reset
    2225             :  * @state: Reset state to enter into
    2226             :  *
    2227             :  * Sets the PCI reset state for the device.
    2228             :  */
    2229           0 : int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
    2230             : {
    2231           0 :         return pcibios_set_pcie_reset_state(dev, state);
    2232             : }
    2233             : EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
    2234             : 
    2235             : #ifdef CONFIG_PCIEAER
    2236             : void pcie_clear_device_status(struct pci_dev *dev)
    2237             : {
    2238             :         u16 sta;
    2239             : 
    2240             :         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
    2241             :         pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
    2242             : }
    2243             : #endif
    2244             : 
    2245             : /**
    2246             :  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
    2247             :  * @dev: PCIe root port or event collector.
    2248             :  */
    2249           0 : void pcie_clear_root_pme_status(struct pci_dev *dev)
    2250             : {
    2251           0 :         pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
    2252           0 : }
    2253             : 
    2254             : /**
    2255             :  * pci_check_pme_status - Check if given device has generated PME.
    2256             :  * @dev: Device to check.
    2257             :  *
    2258             :  * Check the PME status of the device and if set, clear it and clear PME enable
    2259             :  * (if set).  Return 'true' if PME status and PME enable were both set or
    2260             :  * 'false' otherwise.
    2261             :  */
    2262           0 : bool pci_check_pme_status(struct pci_dev *dev)
    2263             : {
    2264             :         int pmcsr_pos;
    2265             :         u16 pmcsr;
    2266           0 :         bool ret = false;
    2267             : 
    2268           0 :         if (!dev->pm_cap)
    2269             :                 return false;
    2270             : 
    2271           0 :         pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
    2272           0 :         pci_read_config_word(dev, pmcsr_pos, &pmcsr);
    2273           0 :         if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
    2274             :                 return false;
    2275             : 
    2276             :         /* Clear PME status. */
    2277           0 :         pmcsr |= PCI_PM_CTRL_PME_STATUS;
    2278           0 :         if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
    2279             :                 /* Disable PME to avoid interrupt flood. */
    2280           0 :                 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
    2281           0 :                 ret = true;
    2282             :         }
    2283             : 
    2284           0 :         pci_write_config_word(dev, pmcsr_pos, pmcsr);
    2285             : 
    2286           0 :         return ret;
    2287             : }
    2288             : 
    2289             : /**
    2290             :  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
    2291             :  * @dev: Device to handle.
    2292             :  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
    2293             :  *
    2294             :  * Check if @dev has generated PME and queue a resume request for it in that
    2295             :  * case.
    2296             :  */
    2297           0 : static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
    2298             : {
    2299           0 :         if (pme_poll_reset && dev->pme_poll)
    2300           0 :                 dev->pme_poll = false;
    2301             : 
    2302           0 :         if (pci_check_pme_status(dev)) {
    2303           0 :                 pci_wakeup_event(dev);
    2304           0 :                 pm_request_resume(&dev->dev);
    2305             :         }
    2306           0 :         return 0;
    2307             : }
    2308             : 
    2309             : /**
    2310             :  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
    2311             :  * @bus: Top bus of the subtree to walk.
    2312             :  */
    2313           0 : void pci_pme_wakeup_bus(struct pci_bus *bus)
    2314             : {
    2315           0 :         if (bus)
    2316           0 :                 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
    2317           0 : }
    2318             : 
    2319             : 
    2320             : /**
    2321             :  * pci_pme_capable - check the capability of PCI device to generate PME#
    2322             :  * @dev: PCI device to handle.
    2323             :  * @state: PCI state from which device will issue PME#.
    2324             :  */
    2325           0 : bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
    2326             : {
    2327           0 :         if (!dev->pm_cap)
    2328             :                 return false;
    2329             : 
    2330           0 :         return !!(dev->pme_support & (1 << state));
    2331             : }
    2332             : EXPORT_SYMBOL(pci_pme_capable);
    2333             : 
    2334           0 : static void pci_pme_list_scan(struct work_struct *work)
    2335             : {
    2336             :         struct pci_pme_device *pme_dev, *n;
    2337             : 
    2338           0 :         mutex_lock(&pci_pme_list_mutex);
    2339           0 :         list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
    2340           0 :                 if (pme_dev->dev->pme_poll) {
    2341             :                         struct pci_dev *bridge;
    2342             : 
    2343           0 :                         bridge = pme_dev->dev->bus->self;
    2344             :                         /*
    2345             :                          * If bridge is in low power state, the
    2346             :                          * configuration space of subordinate devices
    2347             :                          * may be not accessible
    2348             :                          */
    2349           0 :                         if (bridge && bridge->current_state != PCI_D0)
    2350           0 :                                 continue;
    2351             :                         /*
    2352             :                          * If the device is in D3cold it should not be
    2353             :                          * polled either.
    2354             :                          */
    2355           0 :                         if (pme_dev->dev->current_state == PCI_D3cold)
    2356           0 :                                 continue;
    2357             : 
    2358           0 :                         pci_pme_wakeup(pme_dev->dev, NULL);
    2359             :                 } else {
    2360           0 :                         list_del(&pme_dev->list);
    2361           0 :                         kfree(pme_dev);
    2362             :                 }
    2363             :         }
    2364           0 :         if (!list_empty(&pci_pme_list))
    2365           0 :                 queue_delayed_work(system_freezable_wq, &pci_pme_work,
    2366             :                                    msecs_to_jiffies(PME_TIMEOUT));
    2367           0 :         mutex_unlock(&pci_pme_list_mutex);
    2368           0 : }
    2369             : 
    2370           0 : static void __pci_pme_active(struct pci_dev *dev, bool enable)
    2371             : {
    2372             :         u16 pmcsr;
    2373             : 
    2374           0 :         if (!dev->pme_support)
    2375           0 :                 return;
    2376             : 
    2377           0 :         pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
    2378             :         /* Clear PME_Status by writing 1 to it and enable PME# */
    2379           0 :         pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
    2380           0 :         if (!enable)
    2381           0 :                 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
    2382             : 
    2383           0 :         pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
    2384             : }
    2385             : 
    2386             : /**
    2387             :  * pci_pme_restore - Restore PME configuration after config space restore.
    2388             :  * @dev: PCI device to update.
    2389             :  */
    2390           0 : void pci_pme_restore(struct pci_dev *dev)
    2391             : {
    2392             :         u16 pmcsr;
    2393             : 
    2394           0 :         if (!dev->pme_support)
    2395           0 :                 return;
    2396             : 
    2397           0 :         pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
    2398           0 :         if (dev->wakeup_prepared) {
    2399           0 :                 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
    2400           0 :                 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
    2401             :         } else {
    2402           0 :                 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
    2403           0 :                 pmcsr |= PCI_PM_CTRL_PME_STATUS;
    2404             :         }
    2405           0 :         pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
    2406             : }
    2407             : 
    2408             : /**
    2409             :  * pci_pme_active - enable or disable PCI device's PME# function
    2410             :  * @dev: PCI device to handle.
    2411             :  * @enable: 'true' to enable PME# generation; 'false' to disable it.
    2412             :  *
    2413             :  * The caller must verify that the device is capable of generating PME# before
    2414             :  * calling this function with @enable equal to 'true'.
    2415             :  */
    2416           0 : void pci_pme_active(struct pci_dev *dev, bool enable)
    2417             : {
    2418           0 :         __pci_pme_active(dev, enable);
    2419             : 
    2420             :         /*
    2421             :          * PCI (as opposed to PCIe) PME requires that the device have
    2422             :          * its PME# line hooked up correctly. Not all hardware vendors
    2423             :          * do this, so the PME never gets delivered and the device
    2424             :          * remains asleep. The easiest way around this is to
    2425             :          * periodically walk the list of suspended devices and check
    2426             :          * whether any have their PME flag set. The assumption is that
    2427             :          * we'll wake up often enough anyway that this won't be a huge
    2428             :          * hit, and the power savings from the devices will still be a
    2429             :          * win.
    2430             :          *
    2431             :          * Although PCIe uses in-band PME message instead of PME# line
    2432             :          * to report PME, PME does not work for some PCIe devices in
    2433             :          * reality.  For example, there are devices that set their PME
    2434             :          * status bits, but don't really bother to send a PME message;
    2435             :          * there are PCI Express Root Ports that don't bother to
    2436             :          * trigger interrupts when they receive PME messages from the
    2437             :          * devices below.  So PME poll is used for PCIe devices too.
    2438             :          */
    2439             : 
    2440           0 :         if (dev->pme_poll) {
    2441             :                 struct pci_pme_device *pme_dev;
    2442           0 :                 if (enable) {
    2443           0 :                         pme_dev = kmalloc(sizeof(struct pci_pme_device),
    2444             :                                           GFP_KERNEL);
    2445           0 :                         if (!pme_dev) {
    2446           0 :                                 pci_warn(dev, "can't enable PME#\n");
    2447           0 :                                 return;
    2448             :                         }
    2449           0 :                         pme_dev->dev = dev;
    2450           0 :                         mutex_lock(&pci_pme_list_mutex);
    2451           0 :                         list_add(&pme_dev->list, &pci_pme_list);
    2452           0 :                         if (list_is_singular(&pci_pme_list))
    2453           0 :                                 queue_delayed_work(system_freezable_wq,
    2454             :                                                    &pci_pme_work,
    2455             :                                                    msecs_to_jiffies(PME_TIMEOUT));
    2456           0 :                         mutex_unlock(&pci_pme_list_mutex);
    2457             :                 } else {
    2458           0 :                         mutex_lock(&pci_pme_list_mutex);
    2459           0 :                         list_for_each_entry(pme_dev, &pci_pme_list, list) {
    2460           0 :                                 if (pme_dev->dev == dev) {
    2461           0 :                                         list_del(&pme_dev->list);
    2462           0 :                                         kfree(pme_dev);
    2463           0 :                                         break;
    2464             :                                 }
    2465             :                         }
    2466           0 :                         mutex_unlock(&pci_pme_list_mutex);
    2467             :                 }
    2468             :         }
    2469             : 
    2470             :         pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
    2471             : }
    2472             : EXPORT_SYMBOL(pci_pme_active);
    2473             : 
    2474             : /**
    2475             :  * __pci_enable_wake - enable PCI device as wakeup event source
    2476             :  * @dev: PCI device affected
    2477             :  * @state: PCI state from which device will issue wakeup events
    2478             :  * @enable: True to enable event generation; false to disable
    2479             :  *
    2480             :  * This enables the device as a wakeup event source, or disables it.
    2481             :  * When such events involves platform-specific hooks, those hooks are
    2482             :  * called automatically by this routine.
    2483             :  *
    2484             :  * Devices with legacy power management (no standard PCI PM capabilities)
    2485             :  * always require such platform hooks.
    2486             :  *
    2487             :  * RETURN VALUE:
    2488             :  * 0 is returned on success
    2489             :  * -EINVAL is returned if device is not supposed to wake up the system
    2490             :  * Error code depending on the platform is returned if both the platform and
    2491             :  * the native mechanism fail to enable the generation of wake-up events
    2492             :  */
    2493           0 : static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
    2494             : {
    2495           0 :         int ret = 0;
    2496             : 
    2497             :         /*
    2498             :          * Bridges that are not power-manageable directly only signal
    2499             :          * wakeup on behalf of subordinate devices which is set up
    2500             :          * elsewhere, so skip them. However, bridges that are
    2501             :          * power-manageable may signal wakeup for themselves (for example,
    2502             :          * on a hotplug event) and they need to be covered here.
    2503             :          */
    2504           0 :         if (!pci_power_manageable(dev))
    2505             :                 return 0;
    2506             : 
    2507             :         /* Don't do the same thing twice in a row for one device. */
    2508           0 :         if (!!enable == !!dev->wakeup_prepared)
    2509             :                 return 0;
    2510             : 
    2511             :         /*
    2512             :          * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
    2513             :          * Anderson we should be doing PME# wake enable followed by ACPI wake
    2514             :          * enable.  To disable wake-up we call the platform first, for symmetry.
    2515             :          */
    2516             : 
    2517           0 :         if (enable) {
    2518             :                 int error;
    2519             : 
    2520             :                 /*
    2521             :                  * Enable PME signaling if the device can signal PME from
    2522             :                  * D3cold regardless of whether or not it can signal PME from
    2523             :                  * the current target state, because that will allow it to
    2524             :                  * signal PME when the hierarchy above it goes into D3cold and
    2525             :                  * the device itself ends up in D3cold as a result of that.
    2526             :                  */
    2527           0 :                 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
    2528           0 :                         pci_pme_active(dev, true);
    2529             :                 else
    2530             :                         ret = 1;
    2531           0 :                 error = platform_pci_set_wakeup(dev, true);
    2532           0 :                 if (ret)
    2533           0 :                         ret = error;
    2534           0 :                 if (!ret)
    2535           0 :                         dev->wakeup_prepared = true;
    2536             :         } else {
    2537           0 :                 platform_pci_set_wakeup(dev, false);
    2538           0 :                 pci_pme_active(dev, false);
    2539           0 :                 dev->wakeup_prepared = false;
    2540             :         }
    2541             : 
    2542             :         return ret;
    2543             : }
    2544             : 
    2545             : /**
    2546             :  * pci_enable_wake - change wakeup settings for a PCI device
    2547             :  * @pci_dev: Target device
    2548             :  * @state: PCI state from which device will issue wakeup events
    2549             :  * @enable: Whether or not to enable event generation
    2550             :  *
    2551             :  * If @enable is set, check device_may_wakeup() for the device before calling
    2552             :  * __pci_enable_wake() for it.
    2553             :  */
    2554           0 : int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
    2555             : {
    2556           0 :         if (enable && !device_may_wakeup(&pci_dev->dev))
    2557             :                 return -EINVAL;
    2558             : 
    2559           0 :         return __pci_enable_wake(pci_dev, state, enable);
    2560             : }
    2561             : EXPORT_SYMBOL(pci_enable_wake);
    2562             : 
    2563             : /**
    2564             :  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
    2565             :  * @dev: PCI device to prepare
    2566             :  * @enable: True to enable wake-up event generation; false to disable
    2567             :  *
    2568             :  * Many drivers want the device to wake up the system from D3_hot or D3_cold
    2569             :  * and this function allows them to set that up cleanly - pci_enable_wake()
    2570             :  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
    2571             :  * ordering constraints.
    2572             :  *
    2573             :  * This function only returns error code if the device is not allowed to wake
    2574             :  * up the system from sleep or it is not capable of generating PME# from both
    2575             :  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
    2576             :  */
    2577           0 : int pci_wake_from_d3(struct pci_dev *dev, bool enable)
    2578             : {
    2579           0 :         return pci_pme_capable(dev, PCI_D3cold) ?
    2580           0 :                         pci_enable_wake(dev, PCI_D3cold, enable) :
    2581           0 :                         pci_enable_wake(dev, PCI_D3hot, enable);
    2582             : }
    2583             : EXPORT_SYMBOL(pci_wake_from_d3);
    2584             : 
    2585             : /**
    2586             :  * pci_target_state - find an appropriate low power state for a given PCI dev
    2587             :  * @dev: PCI device
    2588             :  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
    2589             :  *
    2590             :  * Use underlying platform code to find a supported low power state for @dev.
    2591             :  * If the platform can't manage @dev, return the deepest state from which it
    2592             :  * can generate wake events, based on any available PME info.
    2593             :  */
    2594           0 : static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
    2595             : {
    2596           0 :         if (platform_pci_power_manageable(dev)) {
    2597             :                 /*
    2598             :                  * Call the platform to find the target state for the device.
    2599             :                  */
    2600             :                 pci_power_t state = platform_pci_choose_state(dev);
    2601             : 
    2602             :                 switch (state) {
    2603             :                 case PCI_POWER_ERROR:
    2604             :                 case PCI_UNKNOWN:
    2605             :                         return PCI_D3hot;
    2606             : 
    2607             :                 case PCI_D1:
    2608             :                 case PCI_D2:
    2609             :                         if (pci_no_d1d2(dev))
    2610             :                                 return PCI_D3hot;
    2611             :                 }
    2612             : 
    2613             :                 return state;
    2614             :         }
    2615             : 
    2616             :         /*
    2617             :          * If the device is in D3cold even though it's not power-manageable by
    2618             :          * the platform, it may have been powered down by non-standard means.
    2619             :          * Best to let it slumber.
    2620             :          */
    2621           0 :         if (dev->current_state == PCI_D3cold)
    2622             :                 return PCI_D3cold;
    2623           0 :         else if (!dev->pm_cap)
    2624             :                 return PCI_D0;
    2625             : 
    2626           0 :         if (wakeup && dev->pme_support) {
    2627             :                 pci_power_t state = PCI_D3hot;
    2628             : 
    2629             :                 /*
    2630             :                  * Find the deepest state from which the device can generate
    2631             :                  * PME#.
    2632             :                  */
    2633           0 :                 while (state && !(dev->pme_support & (1 << state)))
    2634           0 :                         state--;
    2635             : 
    2636           0 :                 if (state)
    2637             :                         return state;
    2638           0 :                 else if (dev->pme_support & 1)
    2639             :                         return PCI_D0;
    2640             :         }
    2641             : 
    2642           0 :         return PCI_D3hot;
    2643             : }
    2644             : 
    2645             : /**
    2646             :  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
    2647             :  *                        into a sleep state
    2648             :  * @dev: Device to handle.
    2649             :  *
    2650             :  * Choose the power state appropriate for the device depending on whether
    2651             :  * it can wake up the system and/or is power manageable by the platform
    2652             :  * (PCI_D3hot is the default) and put the device into that state.
    2653             :  */
    2654           0 : int pci_prepare_to_sleep(struct pci_dev *dev)
    2655             : {
    2656           0 :         bool wakeup = device_may_wakeup(&dev->dev);
    2657           0 :         pci_power_t target_state = pci_target_state(dev, wakeup);
    2658             :         int error;
    2659             : 
    2660           0 :         if (target_state == PCI_POWER_ERROR)
    2661             :                 return -EIO;
    2662             : 
    2663             :         /*
    2664             :          * There are systems (for example, Intel mobile chips since Coffee
    2665             :          * Lake) where the power drawn while suspended can be significantly
    2666             :          * reduced by disabling PTM on PCIe root ports as this allows the
    2667             :          * port to enter a lower-power PM state and the SoC to reach a
    2668             :          * lower-power idle state as a whole.
    2669             :          */
    2670           0 :         if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
    2671             :                 pci_disable_ptm(dev);
    2672             : 
    2673           0 :         pci_enable_wake(dev, target_state, wakeup);
    2674             : 
    2675           0 :         error = pci_set_power_state(dev, target_state);
    2676             : 
    2677           0 :         if (error) {
    2678           0 :                 pci_enable_wake(dev, target_state, false);
    2679           0 :                 pci_restore_ptm_state(dev);
    2680             :         }
    2681             : 
    2682             :         return error;
    2683             : }
    2684             : EXPORT_SYMBOL(pci_prepare_to_sleep);
    2685             : 
    2686             : /**
    2687             :  * pci_back_from_sleep - turn PCI device on during system-wide transition
    2688             :  *                       into working state
    2689             :  * @dev: Device to handle.
    2690             :  *
    2691             :  * Disable device's system wake-up capability and put it into D0.
    2692             :  */
    2693           0 : int pci_back_from_sleep(struct pci_dev *dev)
    2694             : {
    2695           0 :         int ret = pci_set_power_state(dev, PCI_D0);
    2696             : 
    2697           0 :         if (ret)
    2698             :                 return ret;
    2699             : 
    2700           0 :         pci_enable_wake(dev, PCI_D0, false);
    2701           0 :         return 0;
    2702             : }
    2703             : EXPORT_SYMBOL(pci_back_from_sleep);
    2704             : 
    2705             : /**
    2706             :  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
    2707             :  * @dev: PCI device being suspended.
    2708             :  *
    2709             :  * Prepare @dev to generate wake-up events at run time and put it into a low
    2710             :  * power state.
    2711             :  */
    2712           0 : int pci_finish_runtime_suspend(struct pci_dev *dev)
    2713             : {
    2714             :         pci_power_t target_state;
    2715             :         int error;
    2716             : 
    2717           0 :         target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
    2718           0 :         if (target_state == PCI_POWER_ERROR)
    2719             :                 return -EIO;
    2720             : 
    2721           0 :         dev->runtime_d3cold = target_state == PCI_D3cold;
    2722             : 
    2723             :         /*
    2724             :          * There are systems (for example, Intel mobile chips since Coffee
    2725             :          * Lake) where the power drawn while suspended can be significantly
    2726             :          * reduced by disabling PTM on PCIe root ports as this allows the
    2727             :          * port to enter a lower-power PM state and the SoC to reach a
    2728             :          * lower-power idle state as a whole.
    2729             :          */
    2730           0 :         if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
    2731             :                 pci_disable_ptm(dev);
    2732             : 
    2733           0 :         __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
    2734             : 
    2735           0 :         error = pci_set_power_state(dev, target_state);
    2736             : 
    2737           0 :         if (error) {
    2738           0 :                 pci_enable_wake(dev, target_state, false);
    2739           0 :                 pci_restore_ptm_state(dev);
    2740           0 :                 dev->runtime_d3cold = false;
    2741             :         }
    2742             : 
    2743             :         return error;
    2744             : }
    2745             : 
    2746             : /**
    2747             :  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
    2748             :  * @dev: Device to check.
    2749             :  *
    2750             :  * Return true if the device itself is capable of generating wake-up events
    2751             :  * (through the platform or using the native PCIe PME) or if the device supports
    2752             :  * PME and one of its upstream bridges can generate wake-up events.
    2753             :  */
    2754           0 : bool pci_dev_run_wake(struct pci_dev *dev)
    2755             : {
    2756           0 :         struct pci_bus *bus = dev->bus;
    2757             : 
    2758           0 :         if (!dev->pme_support)
    2759             :                 return false;
    2760             : 
    2761             :         /* PME-capable in principle, but not from the target power state */
    2762           0 :         if (!pci_pme_capable(dev, pci_target_state(dev, true)))
    2763             :                 return false;
    2764             : 
    2765           0 :         if (device_can_wakeup(&dev->dev))
    2766             :                 return true;
    2767             : 
    2768           0 :         while (bus->parent) {
    2769           0 :                 struct pci_dev *bridge = bus->self;
    2770             : 
    2771           0 :                 if (device_can_wakeup(&bridge->dev))
    2772             :                         return true;
    2773             : 
    2774             :                 bus = bus->parent;
    2775             :         }
    2776             : 
    2777             :         /* We have reached the root bus. */
    2778           0 :         if (bus->bridge)
    2779           0 :                 return device_can_wakeup(bus->bridge);
    2780             : 
    2781             :         return false;
    2782             : }
    2783             : EXPORT_SYMBOL_GPL(pci_dev_run_wake);
    2784             : 
    2785             : /**
    2786             :  * pci_dev_need_resume - Check if it is necessary to resume the device.
    2787             :  * @pci_dev: Device to check.
    2788             :  *
    2789             :  * Return 'true' if the device is not runtime-suspended or it has to be
    2790             :  * reconfigured due to wakeup settings difference between system and runtime
    2791             :  * suspend, or the current power state of it is not suitable for the upcoming
    2792             :  * (system-wide) transition.
    2793             :  */
    2794           0 : bool pci_dev_need_resume(struct pci_dev *pci_dev)
    2795             : {
    2796           0 :         struct device *dev = &pci_dev->dev;
    2797             :         pci_power_t target_state;
    2798             : 
    2799           0 :         if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
    2800             :                 return true;
    2801             : 
    2802           0 :         target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
    2803             : 
    2804             :         /*
    2805             :          * If the earlier platform check has not triggered, D3cold is just power
    2806             :          * removal on top of D3hot, so no need to resume the device in that
    2807             :          * case.
    2808             :          */
    2809           0 :         return target_state != pci_dev->current_state &&
    2810           0 :                 target_state != PCI_D3cold &&
    2811             :                 pci_dev->current_state != PCI_D3hot;
    2812             : }
    2813             : 
    2814             : /**
    2815             :  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
    2816             :  * @pci_dev: Device to check.
    2817             :  *
    2818             :  * If the device is suspended and it is not configured for system wakeup,
    2819             :  * disable PME for it to prevent it from waking up the system unnecessarily.
    2820             :  *
    2821             :  * Note that if the device's power state is D3cold and the platform check in
    2822             :  * pci_dev_need_resume() has not triggered, the device's configuration need not
    2823             :  * be changed.
    2824             :  */
    2825           0 : void pci_dev_adjust_pme(struct pci_dev *pci_dev)
    2826             : {
    2827           0 :         struct device *dev = &pci_dev->dev;
    2828             : 
    2829           0 :         spin_lock_irq(&dev->power.lock);
    2830             : 
    2831           0 :         if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
    2832           0 :             pci_dev->current_state < PCI_D3cold)
    2833           0 :                 __pci_pme_active(pci_dev, false);
    2834             : 
    2835           0 :         spin_unlock_irq(&dev->power.lock);
    2836           0 : }
    2837             : 
    2838             : /**
    2839             :  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
    2840             :  * @pci_dev: Device to handle.
    2841             :  *
    2842             :  * If the device is runtime suspended and wakeup-capable, enable PME for it as
    2843             :  * it might have been disabled during the prepare phase of system suspend if
    2844             :  * the device was not configured for system wakeup.
    2845             :  */
    2846           0 : void pci_dev_complete_resume(struct pci_dev *pci_dev)
    2847             : {
    2848           0 :         struct device *dev = &pci_dev->dev;
    2849             : 
    2850           0 :         if (!pci_dev_run_wake(pci_dev))
    2851             :                 return;
    2852             : 
    2853           0 :         spin_lock_irq(&dev->power.lock);
    2854             : 
    2855           0 :         if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
    2856           0 :                 __pci_pme_active(pci_dev, true);
    2857             : 
    2858           0 :         spin_unlock_irq(&dev->power.lock);
    2859             : }
    2860             : 
    2861             : /**
    2862             :  * pci_choose_state - Choose the power state of a PCI device.
    2863             :  * @dev: Target PCI device.
    2864             :  * @state: Target state for the whole system.
    2865             :  *
    2866             :  * Returns PCI power state suitable for @dev and @state.
    2867             :  */
    2868           0 : pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
    2869             : {
    2870           0 :         if (state.event == PM_EVENT_ON)
    2871             :                 return PCI_D0;
    2872             : 
    2873           0 :         return pci_target_state(dev, false);
    2874             : }
    2875             : EXPORT_SYMBOL(pci_choose_state);
    2876             : 
    2877           0 : void pci_config_pm_runtime_get(struct pci_dev *pdev)
    2878             : {
    2879           0 :         struct device *dev = &pdev->dev;
    2880           0 :         struct device *parent = dev->parent;
    2881             : 
    2882           0 :         if (parent)
    2883             :                 pm_runtime_get_sync(parent);
    2884           0 :         pm_runtime_get_noresume(dev);
    2885             :         /*
    2886             :          * pdev->current_state is set to PCI_D3cold during suspending,
    2887             :          * so wait until suspending completes
    2888             :          */
    2889           0 :         pm_runtime_barrier(dev);
    2890             :         /*
    2891             :          * Only need to resume devices in D3cold, because config
    2892             :          * registers are still accessible for devices suspended but
    2893             :          * not in D3cold.
    2894             :          */
    2895           0 :         if (pdev->current_state == PCI_D3cold)
    2896             :                 pm_runtime_resume(dev);
    2897           0 : }
    2898             : 
    2899           0 : void pci_config_pm_runtime_put(struct pci_dev *pdev)
    2900             : {
    2901           0 :         struct device *dev = &pdev->dev;
    2902           0 :         struct device *parent = dev->parent;
    2903             : 
    2904           0 :         pm_runtime_put(dev);
    2905           0 :         if (parent)
    2906             :                 pm_runtime_put_sync(parent);
    2907           0 : }
    2908             : 
    2909             : static const struct dmi_system_id bridge_d3_blacklist[] = {
    2910             : #ifdef CONFIG_X86
    2911             :         {
    2912             :                 /*
    2913             :                  * Gigabyte X299 root port is not marked as hotplug capable
    2914             :                  * which allows Linux to power manage it.  However, this
    2915             :                  * confuses the BIOS SMI handler so don't power manage root
    2916             :                  * ports on that system.
    2917             :                  */
    2918             :                 .ident = "X299 DESIGNARE EX-CF",
    2919             :                 .matches = {
    2920             :                         DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
    2921             :                         DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
    2922             :                 },
    2923             :                 /*
    2924             :                  * Downstream device is not accessible after putting a root port
    2925             :                  * into D3cold and back into D0 on Elo i2.
    2926             :                  */
    2927             :                 .ident = "Elo i2",
    2928             :                 .matches = {
    2929             :                         DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
    2930             :                         DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
    2931             :                         DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
    2932             :                 },
    2933             :         },
    2934             : #endif
    2935             :         { }
    2936             : };
    2937             : 
    2938             : /**
    2939             :  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
    2940             :  * @bridge: Bridge to check
    2941             :  *
    2942             :  * This function checks if it is possible to move the bridge to D3.
    2943             :  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
    2944             :  */
    2945           0 : bool pci_bridge_d3_possible(struct pci_dev *bridge)
    2946             : {
    2947           0 :         if (!pci_is_pcie(bridge))
    2948             :                 return false;
    2949             : 
    2950           0 :         switch (pci_pcie_type(bridge)) {
    2951             :         case PCI_EXP_TYPE_ROOT_PORT:
    2952             :         case PCI_EXP_TYPE_UPSTREAM:
    2953             :         case PCI_EXP_TYPE_DOWNSTREAM:
    2954           0 :                 if (pci_bridge_d3_disable)
    2955             :                         return false;
    2956             : 
    2957             :                 /*
    2958             :                  * Hotplug ports handled by firmware in System Management Mode
    2959             :                  * may not be put into D3 by the OS (Thunderbolt on non-Macs).
    2960             :                  */
    2961           0 :                 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
    2962             :                         return false;
    2963             : 
    2964           0 :                 if (pci_bridge_d3_force)
    2965             :                         return true;
    2966             : 
    2967             :                 /* Even the oldest 2010 Thunderbolt controller supports D3. */
    2968           0 :                 if (bridge->is_thunderbolt)
    2969             :                         return true;
    2970             : 
    2971             :                 /* Platform might know better if the bridge supports D3 */
    2972           0 :                 if (platform_pci_bridge_d3(bridge))
    2973             :                         return true;
    2974             : 
    2975             :                 /*
    2976             :                  * Hotplug ports handled natively by the OS were not validated
    2977             :                  * by vendors for runtime D3 at least until 2018 because there
    2978             :                  * was no OS support.
    2979             :                  */
    2980             :                 if (bridge->is_hotplug_bridge)
    2981             :                         return false;
    2982             : 
    2983             :                 if (dmi_check_system(bridge_d3_blacklist))
    2984             :                         return false;
    2985             : 
    2986             :                 /*
    2987             :                  * It should be safe to put PCIe ports from 2015 or newer
    2988             :                  * to D3.
    2989             :                  */
    2990             :                 if (dmi_get_bios_year() >= 2015)
    2991             :                         return true;
    2992             :                 break;
    2993             :         }
    2994             : 
    2995             :         return false;
    2996             : }
    2997             : 
    2998           0 : static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
    2999             : {
    3000           0 :         bool *d3cold_ok = data;
    3001             : 
    3002           0 :         if (/* The device needs to be allowed to go D3cold ... */
    3003           0 :             dev->no_d3cold || !dev->d3cold_allowed ||
    3004             : 
    3005             :             /* ... and if it is wakeup capable to do so from D3cold. */
    3006           0 :             (device_may_wakeup(&dev->dev) &&
    3007           0 :              !pci_pme_capable(dev, PCI_D3cold)) ||
    3008             : 
    3009             :             /* If it is a bridge it must be allowed to go to D3. */
    3010           0 :             !pci_power_manageable(dev))
    3011             : 
    3012           0 :                 *d3cold_ok = false;
    3013             : 
    3014           0 :         return !*d3cold_ok;
    3015             : }
    3016             : 
    3017             : /*
    3018             :  * pci_bridge_d3_update - Update bridge D3 capabilities
    3019             :  * @dev: PCI device which is changed
    3020             :  *
    3021             :  * Update upstream bridge PM capabilities accordingly depending on if the
    3022             :  * device PM configuration was changed or the device is being removed.  The
    3023             :  * change is also propagated upstream.
    3024             :  */
    3025           0 : void pci_bridge_d3_update(struct pci_dev *dev)
    3026             : {
    3027           0 :         bool remove = !device_is_registered(&dev->dev);
    3028             :         struct pci_dev *bridge;
    3029           0 :         bool d3cold_ok = true;
    3030             : 
    3031           0 :         bridge = pci_upstream_bridge(dev);
    3032           0 :         if (!bridge || !pci_bridge_d3_possible(bridge))
    3033           0 :                 return;
    3034             : 
    3035             :         /*
    3036             :          * If D3 is currently allowed for the bridge, removing one of its
    3037             :          * children won't change that.
    3038             :          */
    3039           0 :         if (remove && bridge->bridge_d3)
    3040             :                 return;
    3041             : 
    3042             :         /*
    3043             :          * If D3 is currently allowed for the bridge and a child is added or
    3044             :          * changed, disallowance of D3 can only be caused by that child, so
    3045             :          * we only need to check that single device, not any of its siblings.
    3046             :          *
    3047             :          * If D3 is currently not allowed for the bridge, checking the device
    3048             :          * first may allow us to skip checking its siblings.
    3049             :          */
    3050           0 :         if (!remove)
    3051           0 :                 pci_dev_check_d3cold(dev, &d3cold_ok);
    3052             : 
    3053             :         /*
    3054             :          * If D3 is currently not allowed for the bridge, this may be caused
    3055             :          * either by the device being changed/removed or any of its siblings,
    3056             :          * so we need to go through all children to find out if one of them
    3057             :          * continues to block D3.
    3058             :          */
    3059           0 :         if (d3cold_ok && !bridge->bridge_d3)
    3060           0 :                 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
    3061             :                              &d3cold_ok);
    3062             : 
    3063           0 :         if (bridge->bridge_d3 != d3cold_ok) {
    3064           0 :                 bridge->bridge_d3 = d3cold_ok;
    3065             :                 /* Propagate change to upstream bridges */
    3066           0 :                 pci_bridge_d3_update(bridge);
    3067             :         }
    3068             : }
    3069             : 
    3070             : /**
    3071             :  * pci_d3cold_enable - Enable D3cold for device
    3072             :  * @dev: PCI device to handle
    3073             :  *
    3074             :  * This function can be used in drivers to enable D3cold from the device
    3075             :  * they handle.  It also updates upstream PCI bridge PM capabilities
    3076             :  * accordingly.
    3077             :  */
    3078           0 : void pci_d3cold_enable(struct pci_dev *dev)
    3079             : {
    3080           0 :         if (dev->no_d3cold) {
    3081           0 :                 dev->no_d3cold = false;
    3082           0 :                 pci_bridge_d3_update(dev);
    3083             :         }
    3084           0 : }
    3085             : EXPORT_SYMBOL_GPL(pci_d3cold_enable);
    3086             : 
    3087             : /**
    3088             :  * pci_d3cold_disable - Disable D3cold for device
    3089             :  * @dev: PCI device to handle
    3090             :  *
    3091             :  * This function can be used in drivers to disable D3cold from the device
    3092             :  * they handle.  It also updates upstream PCI bridge PM capabilities
    3093             :  * accordingly.
    3094             :  */
    3095           0 : void pci_d3cold_disable(struct pci_dev *dev)
    3096             : {
    3097           0 :         if (!dev->no_d3cold) {
    3098           0 :                 dev->no_d3cold = true;
    3099           0 :                 pci_bridge_d3_update(dev);
    3100             :         }
    3101           0 : }
    3102             : EXPORT_SYMBOL_GPL(pci_d3cold_disable);
    3103             : 
    3104             : /**
    3105             :  * pci_pm_init - Initialize PM functions of given PCI device
    3106             :  * @dev: PCI device to handle.
    3107             :  */
    3108           0 : void pci_pm_init(struct pci_dev *dev)
    3109             : {
    3110             :         int pm;
    3111             :         u16 status;
    3112             :         u16 pmc;
    3113             : 
    3114           0 :         pm_runtime_forbid(&dev->dev);
    3115           0 :         pm_runtime_set_active(&dev->dev);
    3116           0 :         pm_runtime_enable(&dev->dev);
    3117           0 :         device_enable_async_suspend(&dev->dev);
    3118           0 :         dev->wakeup_prepared = false;
    3119             : 
    3120           0 :         dev->pm_cap = 0;
    3121           0 :         dev->pme_support = 0;
    3122             : 
    3123             :         /* find PCI PM capability in list */
    3124           0 :         pm = pci_find_capability(dev, PCI_CAP_ID_PM);
    3125           0 :         if (!pm)
    3126           0 :                 return;
    3127             :         /* Check device's ability to generate PME# */
    3128           0 :         pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
    3129             : 
    3130           0 :         if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
    3131           0 :                 pci_err(dev, "unsupported PM cap regs version (%u)\n",
    3132             :                         pmc & PCI_PM_CAP_VER_MASK);
    3133           0 :                 return;
    3134             :         }
    3135             : 
    3136           0 :         dev->pm_cap = pm;
    3137           0 :         dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
    3138           0 :         dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
    3139           0 :         dev->bridge_d3 = pci_bridge_d3_possible(dev);
    3140           0 :         dev->d3cold_allowed = true;
    3141             : 
    3142           0 :         dev->d1_support = false;
    3143           0 :         dev->d2_support = false;
    3144           0 :         if (!pci_no_d1d2(dev)) {
    3145           0 :                 if (pmc & PCI_PM_CAP_D1)
    3146           0 :                         dev->d1_support = true;
    3147           0 :                 if (pmc & PCI_PM_CAP_D2)
    3148           0 :                         dev->d2_support = true;
    3149             : 
    3150           0 :                 if (dev->d1_support || dev->d2_support)
    3151           0 :                         pci_info(dev, "supports%s%s\n",
    3152             :                                    dev->d1_support ? " D1" : "",
    3153             :                                    dev->d2_support ? " D2" : "");
    3154             :         }
    3155             : 
    3156           0 :         pmc &= PCI_PM_CAP_PME_MASK;
    3157           0 :         if (pmc) {
    3158           0 :                 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
    3159             :                          (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
    3160             :                          (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
    3161             :                          (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
    3162             :                          (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
    3163             :                          (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
    3164           0 :                 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
    3165           0 :                 dev->pme_poll = true;
    3166             :                 /*
    3167             :                  * Make device's PM flags reflect the wake-up capability, but
    3168             :                  * let the user space enable it to wake up the system as needed.
    3169             :                  */
    3170           0 :                 device_set_wakeup_capable(&dev->dev, true);
    3171             :                 /* Disable the PME# generation functionality */
    3172           0 :                 pci_pme_active(dev, false);
    3173             :         }
    3174             : 
    3175           0 :         pci_read_config_word(dev, PCI_STATUS, &status);
    3176           0 :         if (status & PCI_STATUS_IMM_READY)
    3177           0 :                 dev->imm_ready = 1;
    3178             : }
    3179             : 
    3180             : static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
    3181             : {
    3182           0 :         unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
    3183             : 
    3184             :         switch (prop) {
    3185             :         case PCI_EA_P_MEM:
    3186             :         case PCI_EA_P_VF_MEM:
    3187             :                 flags |= IORESOURCE_MEM;
    3188             :                 break;
    3189             :         case PCI_EA_P_MEM_PREFETCH:
    3190             :         case PCI_EA_P_VF_MEM_PREFETCH:
    3191             :                 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
    3192             :                 break;
    3193             :         case PCI_EA_P_IO:
    3194             :                 flags |= IORESOURCE_IO;
    3195             :                 break;
    3196             :         default:
    3197             :                 return 0;
    3198             :         }
    3199             : 
    3200             :         return flags;
    3201             : }
    3202             : 
    3203             : static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
    3204             :                                             u8 prop)
    3205             : {
    3206           0 :         if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
    3207           0 :                 return &dev->resource[bei];
    3208             : #ifdef CONFIG_PCI_IOV
    3209             :         else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
    3210             :                  (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
    3211             :                 return &dev->resource[PCI_IOV_RESOURCES +
    3212             :                                       bei - PCI_EA_BEI_VF_BAR0];
    3213             : #endif
    3214           0 :         else if (bei == PCI_EA_BEI_ROM)
    3215           0 :                 return &dev->resource[PCI_ROM_RESOURCE];
    3216             :         else
    3217             :                 return NULL;
    3218             : }
    3219             : 
    3220             : /* Read an Enhanced Allocation (EA) entry */
    3221           0 : static int pci_ea_read(struct pci_dev *dev, int offset)
    3222             : {
    3223             :         struct resource *res;
    3224           0 :         int ent_size, ent_offset = offset;
    3225             :         resource_size_t start, end;
    3226             :         unsigned long flags;
    3227             :         u32 dw0, bei, base, max_offset;
    3228             :         u8 prop;
    3229           0 :         bool support_64 = (sizeof(resource_size_t) >= 8);
    3230             : 
    3231           0 :         pci_read_config_dword(dev, ent_offset, &dw0);
    3232           0 :         ent_offset += 4;
    3233             : 
    3234             :         /* Entry size field indicates DWORDs after 1st */
    3235           0 :         ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
    3236             : 
    3237           0 :         if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
    3238             :                 goto out;
    3239             : 
    3240           0 :         bei = (dw0 & PCI_EA_BEI) >> 4;
    3241           0 :         prop = (dw0 & PCI_EA_PP) >> 8;
    3242             : 
    3243             :         /*
    3244             :          * If the Property is in the reserved range, try the Secondary
    3245             :          * Property instead.
    3246             :          */
    3247           0 :         if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
    3248           0 :                 prop = (dw0 & PCI_EA_SP) >> 16;
    3249           0 :         if (prop > PCI_EA_P_BRIDGE_IO)
    3250             :                 goto out;
    3251             : 
    3252           0 :         res = pci_ea_get_resource(dev, bei, prop);
    3253           0 :         if (!res) {
    3254           0 :                 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
    3255           0 :                 goto out;
    3256             :         }
    3257             : 
    3258           0 :         flags = pci_ea_flags(dev, prop);
    3259           0 :         if (!flags) {
    3260           0 :                 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
    3261           0 :                 goto out;
    3262             :         }
    3263             : 
    3264             :         /* Read Base */
    3265           0 :         pci_read_config_dword(dev, ent_offset, &base);
    3266           0 :         start = (base & PCI_EA_FIELD_MASK);
    3267           0 :         ent_offset += 4;
    3268             : 
    3269             :         /* Read MaxOffset */
    3270           0 :         pci_read_config_dword(dev, ent_offset, &max_offset);
    3271           0 :         ent_offset += 4;
    3272             : 
    3273             :         /* Read Base MSBs (if 64-bit entry) */
    3274           0 :         if (base & PCI_EA_IS_64) {
    3275             :                 u32 base_upper;
    3276             : 
    3277           0 :                 pci_read_config_dword(dev, ent_offset, &base_upper);
    3278           0 :                 ent_offset += 4;
    3279             : 
    3280           0 :                 flags |= IORESOURCE_MEM_64;
    3281             : 
    3282             :                 /* entry starts above 32-bit boundary, can't use */
    3283             :                 if (!support_64 && base_upper)
    3284             :                         goto out;
    3285             : 
    3286             :                 if (support_64)
    3287           0 :                         start |= ((u64)base_upper << 32);
    3288             :         }
    3289             : 
    3290           0 :         end = start + (max_offset | 0x03);
    3291             : 
    3292             :         /* Read MaxOffset MSBs (if 64-bit entry) */
    3293           0 :         if (max_offset & PCI_EA_IS_64) {
    3294             :                 u32 max_offset_upper;
    3295             : 
    3296           0 :                 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
    3297           0 :                 ent_offset += 4;
    3298             : 
    3299           0 :                 flags |= IORESOURCE_MEM_64;
    3300             : 
    3301             :                 /* entry too big, can't use */
    3302             :                 if (!support_64 && max_offset_upper)
    3303             :                         goto out;
    3304             : 
    3305             :                 if (support_64)
    3306           0 :                         end += ((u64)max_offset_upper << 32);
    3307             :         }
    3308             : 
    3309           0 :         if (end < start) {
    3310           0 :                 pci_err(dev, "EA Entry crosses address boundary\n");
    3311           0 :                 goto out;
    3312             :         }
    3313             : 
    3314           0 :         if (ent_size != ent_offset - offset) {
    3315           0 :                 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
    3316             :                         ent_size, ent_offset - offset);
    3317           0 :                 goto out;
    3318             :         }
    3319             : 
    3320           0 :         res->name = pci_name(dev);
    3321           0 :         res->start = start;
    3322           0 :         res->end = end;
    3323           0 :         res->flags = flags;
    3324             : 
    3325           0 :         if (bei <= PCI_EA_BEI_BAR5)
    3326           0 :                 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
    3327             :                            bei, res, prop);
    3328           0 :         else if (bei == PCI_EA_BEI_ROM)
    3329           0 :                 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
    3330             :                            res, prop);
    3331           0 :         else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
    3332           0 :                 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
    3333             :                            bei - PCI_EA_BEI_VF_BAR0, res, prop);
    3334             :         else
    3335           0 :                 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
    3336             :                            bei, res, prop);
    3337             : 
    3338             : out:
    3339           0 :         return offset + ent_size;
    3340             : }
    3341             : 
    3342             : /* Enhanced Allocation Initialization */
    3343           0 : void pci_ea_init(struct pci_dev *dev)
    3344             : {
    3345             :         int ea;
    3346             :         u8 num_ent;
    3347             :         int offset;
    3348             :         int i;
    3349             : 
    3350             :         /* find PCI EA capability in list */
    3351           0 :         ea = pci_find_capability(dev, PCI_CAP_ID_EA);
    3352           0 :         if (!ea)
    3353           0 :                 return;
    3354             : 
    3355             :         /* determine the number of entries */
    3356           0 :         pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
    3357             :                                         &num_ent);
    3358           0 :         num_ent &= PCI_EA_NUM_ENT_MASK;
    3359             : 
    3360           0 :         offset = ea + PCI_EA_FIRST_ENT;
    3361             : 
    3362             :         /* Skip DWORD 2 for type 1 functions */
    3363           0 :         if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
    3364           0 :                 offset += 4;
    3365             : 
    3366             :         /* parse each EA entry */
    3367           0 :         for (i = 0; i < num_ent; ++i)
    3368           0 :                 offset = pci_ea_read(dev, offset);
    3369             : }
    3370             : 
    3371             : static void pci_add_saved_cap(struct pci_dev *pci_dev,
    3372             :         struct pci_cap_saved_state *new_cap)
    3373             : {
    3374           0 :         hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
    3375             : }
    3376             : 
    3377             : /**
    3378             :  * _pci_add_cap_save_buffer - allocate buffer for saving given
    3379             :  *                            capability registers
    3380             :  * @dev: the PCI device
    3381             :  * @cap: the capability to allocate the buffer for
    3382             :  * @extended: Standard or Extended capability ID
    3383             :  * @size: requested size of the buffer
    3384             :  */
    3385           0 : static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
    3386             :                                     bool extended, unsigned int size)
    3387             : {
    3388             :         int pos;
    3389             :         struct pci_cap_saved_state *save_state;
    3390             : 
    3391           0 :         if (extended)
    3392           0 :                 pos = pci_find_ext_capability(dev, cap);
    3393             :         else
    3394           0 :                 pos = pci_find_capability(dev, cap);
    3395             : 
    3396           0 :         if (!pos)
    3397             :                 return 0;
    3398             : 
    3399           0 :         save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
    3400           0 :         if (!save_state)
    3401             :                 return -ENOMEM;
    3402             : 
    3403           0 :         save_state->cap.cap_nr = cap;
    3404           0 :         save_state->cap.cap_extended = extended;
    3405           0 :         save_state->cap.size = size;
    3406           0 :         pci_add_saved_cap(dev, save_state);
    3407             : 
    3408           0 :         return 0;
    3409             : }
    3410             : 
    3411           0 : int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
    3412             : {
    3413           0 :         return _pci_add_cap_save_buffer(dev, cap, false, size);
    3414             : }
    3415             : 
    3416           0 : int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
    3417             : {
    3418           0 :         return _pci_add_cap_save_buffer(dev, cap, true, size);
    3419             : }
    3420             : 
    3421             : /**
    3422             :  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
    3423             :  * @dev: the PCI device
    3424             :  */
    3425           0 : void pci_allocate_cap_save_buffers(struct pci_dev *dev)
    3426             : {
    3427             :         int error;
    3428             : 
    3429           0 :         error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
    3430             :                                         PCI_EXP_SAVE_REGS * sizeof(u16));
    3431           0 :         if (error)
    3432           0 :                 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
    3433             : 
    3434           0 :         error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
    3435           0 :         if (error)
    3436           0 :                 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
    3437             : 
    3438           0 :         error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
    3439             :                                             2 * sizeof(u16));
    3440           0 :         if (error)
    3441           0 :                 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
    3442             : 
    3443           0 :         pci_allocate_vc_save_buffers(dev);
    3444           0 : }
    3445             : 
    3446           0 : void pci_free_cap_save_buffers(struct pci_dev *dev)
    3447             : {
    3448             :         struct pci_cap_saved_state *tmp;
    3449             :         struct hlist_node *n;
    3450             : 
    3451           0 :         hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
    3452           0 :                 kfree(tmp);
    3453           0 : }
    3454             : 
    3455             : /**
    3456             :  * pci_configure_ari - enable or disable ARI forwarding
    3457             :  * @dev: the PCI device
    3458             :  *
    3459             :  * If @dev and its upstream bridge both support ARI, enable ARI in the
    3460             :  * bridge.  Otherwise, disable ARI in the bridge.
    3461             :  */
    3462           0 : void pci_configure_ari(struct pci_dev *dev)
    3463             : {
    3464             :         u32 cap;
    3465             :         struct pci_dev *bridge;
    3466             : 
    3467           0 :         if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
    3468           0 :                 return;
    3469             : 
    3470           0 :         bridge = dev->bus->self;
    3471           0 :         if (!bridge)
    3472             :                 return;
    3473             : 
    3474           0 :         pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
    3475           0 :         if (!(cap & PCI_EXP_DEVCAP2_ARI))
    3476             :                 return;
    3477             : 
    3478           0 :         if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
    3479           0 :                 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
    3480             :                                          PCI_EXP_DEVCTL2_ARI);
    3481           0 :                 bridge->ari_enabled = 1;
    3482             :         } else {
    3483           0 :                 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
    3484             :                                            PCI_EXP_DEVCTL2_ARI);
    3485           0 :                 bridge->ari_enabled = 0;
    3486             :         }
    3487             : }
    3488             : 
    3489           0 : static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
    3490             : {
    3491             :         int pos;
    3492             :         u16 cap, ctrl;
    3493             : 
    3494           0 :         pos = pdev->acs_cap;
    3495           0 :         if (!pos)
    3496             :                 return false;
    3497             : 
    3498             :         /*
    3499             :          * Except for egress control, capabilities are either required
    3500             :          * or only required if controllable.  Features missing from the
    3501             :          * capability field can therefore be assumed as hard-wired enabled.
    3502             :          */
    3503           0 :         pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
    3504           0 :         acs_flags &= (cap | PCI_ACS_EC);
    3505             : 
    3506           0 :         pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
    3507           0 :         return (ctrl & acs_flags) == acs_flags;
    3508             : }
    3509             : 
    3510             : /**
    3511             :  * pci_acs_enabled - test ACS against required flags for a given device
    3512             :  * @pdev: device to test
    3513             :  * @acs_flags: required PCI ACS flags
    3514             :  *
    3515             :  * Return true if the device supports the provided flags.  Automatically
    3516             :  * filters out flags that are not implemented on multifunction devices.
    3517             :  *
    3518             :  * Note that this interface checks the effective ACS capabilities of the
    3519             :  * device rather than the actual capabilities.  For instance, most single
    3520             :  * function endpoints are not required to support ACS because they have no
    3521             :  * opportunity for peer-to-peer access.  We therefore return 'true'
    3522             :  * regardless of whether the device exposes an ACS capability.  This makes
    3523             :  * it much easier for callers of this function to ignore the actual type
    3524             :  * or topology of the device when testing ACS support.
    3525             :  */
    3526           0 : bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
    3527             : {
    3528             :         int ret;
    3529             : 
    3530           0 :         ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
    3531           0 :         if (ret >= 0)
    3532           0 :                 return ret > 0;
    3533             : 
    3534             :         /*
    3535             :          * Conventional PCI and PCI-X devices never support ACS, either
    3536             :          * effectively or actually.  The shared bus topology implies that
    3537             :          * any device on the bus can receive or snoop DMA.
    3538             :          */
    3539           0 :         if (!pci_is_pcie(pdev))
    3540             :                 return false;
    3541             : 
    3542           0 :         switch (pci_pcie_type(pdev)) {
    3543             :         /*
    3544             :          * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
    3545             :          * but since their primary interface is PCI/X, we conservatively
    3546             :          * handle them as we would a non-PCIe device.
    3547             :          */
    3548             :         case PCI_EXP_TYPE_PCIE_BRIDGE:
    3549             :         /*
    3550             :          * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
    3551             :          * applicable... must never implement an ACS Extended Capability...".
    3552             :          * This seems arbitrary, but we take a conservative interpretation
    3553             :          * of this statement.
    3554             :          */
    3555             :         case PCI_EXP_TYPE_PCI_BRIDGE:
    3556             :         case PCI_EXP_TYPE_RC_EC:
    3557             :                 return false;
    3558             :         /*
    3559             :          * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
    3560             :          * implement ACS in order to indicate their peer-to-peer capabilities,
    3561             :          * regardless of whether they are single- or multi-function devices.
    3562             :          */
    3563             :         case PCI_EXP_TYPE_DOWNSTREAM:
    3564             :         case PCI_EXP_TYPE_ROOT_PORT:
    3565           0 :                 return pci_acs_flags_enabled(pdev, acs_flags);
    3566             :         /*
    3567             :          * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
    3568             :          * implemented by the remaining PCIe types to indicate peer-to-peer
    3569             :          * capabilities, but only when they are part of a multifunction
    3570             :          * device.  The footnote for section 6.12 indicates the specific
    3571             :          * PCIe types included here.
    3572             :          */
    3573             :         case PCI_EXP_TYPE_ENDPOINT:
    3574             :         case PCI_EXP_TYPE_UPSTREAM:
    3575             :         case PCI_EXP_TYPE_LEG_END:
    3576             :         case PCI_EXP_TYPE_RC_END:
    3577           0 :                 if (!pdev->multifunction)
    3578             :                         break;
    3579             : 
    3580           0 :                 return pci_acs_flags_enabled(pdev, acs_flags);
    3581             :         }
    3582             : 
    3583             :         /*
    3584             :          * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
    3585             :          * to single function devices with the exception of downstream ports.
    3586             :          */
    3587           0 :         return true;
    3588             : }
    3589             : 
    3590             : /**
    3591             :  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
    3592             :  * @start: starting downstream device
    3593             :  * @end: ending upstream device or NULL to search to the root bus
    3594             :  * @acs_flags: required flags
    3595             :  *
    3596             :  * Walk up a device tree from start to end testing PCI ACS support.  If
    3597             :  * any step along the way does not support the required flags, return false.
    3598             :  */
    3599           0 : bool pci_acs_path_enabled(struct pci_dev *start,
    3600             :                           struct pci_dev *end, u16 acs_flags)
    3601             : {
    3602           0 :         struct pci_dev *pdev, *parent = start;
    3603             : 
    3604             :         do {
    3605           0 :                 pdev = parent;
    3606             : 
    3607           0 :                 if (!pci_acs_enabled(pdev, acs_flags))
    3608             :                         return false;
    3609             : 
    3610           0 :                 if (pci_is_root_bus(pdev->bus))
    3611           0 :                         return (end == NULL);
    3612             : 
    3613           0 :                 parent = pdev->bus->self;
    3614           0 :         } while (pdev != end);
    3615             : 
    3616             :         return true;
    3617             : }
    3618             : 
    3619             : /**
    3620             :  * pci_acs_init - Initialize ACS if hardware supports it
    3621             :  * @dev: the PCI device
    3622             :  */
    3623           0 : void pci_acs_init(struct pci_dev *dev)
    3624             : {
    3625           0 :         dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
    3626             : 
    3627             :         /*
    3628             :          * Attempt to enable ACS regardless of capability because some Root
    3629             :          * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
    3630             :          * the standard ACS capability but still support ACS via those
    3631             :          * quirks.
    3632             :          */
    3633           0 :         pci_enable_acs(dev);
    3634           0 : }
    3635             : 
    3636             : /**
    3637             :  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
    3638             :  * @pdev: PCI device
    3639             :  * @bar: BAR to find
    3640             :  *
    3641             :  * Helper to find the position of the ctrl register for a BAR.
    3642             :  * Returns -ENOTSUPP if resizable BARs are not supported at all.
    3643             :  * Returns -ENOENT if no ctrl register for the BAR could be found.
    3644             :  */
    3645           0 : static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
    3646             : {
    3647             :         unsigned int pos, nbars, i;
    3648             :         u32 ctrl;
    3649             : 
    3650           0 :         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
    3651           0 :         if (!pos)
    3652             :                 return -ENOTSUPP;
    3653             : 
    3654           0 :         pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    3655           0 :         nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
    3656             :                     PCI_REBAR_CTRL_NBAR_SHIFT;
    3657             : 
    3658           0 :         for (i = 0; i < nbars; i++, pos += 8) {
    3659             :                 int bar_idx;
    3660             : 
    3661           0 :                 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    3662           0 :                 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
    3663           0 :                 if (bar_idx == bar)
    3664           0 :                         return pos;
    3665             :         }
    3666             : 
    3667             :         return -ENOENT;
    3668             : }
    3669             : 
    3670             : /**
    3671             :  * pci_rebar_get_possible_sizes - get possible sizes for BAR
    3672             :  * @pdev: PCI device
    3673             :  * @bar: BAR to query
    3674             :  *
    3675             :  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
    3676             :  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
    3677             :  */
    3678           0 : u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
    3679             : {
    3680             :         int pos;
    3681             :         u32 cap;
    3682             : 
    3683           0 :         pos = pci_rebar_find_pos(pdev, bar);
    3684           0 :         if (pos < 0)
    3685             :                 return 0;
    3686             : 
    3687           0 :         pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
    3688           0 :         cap &= PCI_REBAR_CAP_SIZES;
    3689             : 
    3690             :         /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
    3691           0 :         if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
    3692           0 :             bar == 0 && cap == 0x7000)
    3693           0 :                 cap = 0x3f000;
    3694             : 
    3695           0 :         return cap >> 4;
    3696             : }
    3697             : EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
    3698             : 
    3699             : /**
    3700             :  * pci_rebar_get_current_size - get the current size of a BAR
    3701             :  * @pdev: PCI device
    3702             :  * @bar: BAR to set size to
    3703             :  *
    3704             :  * Read the size of a BAR from the resizable BAR config.
    3705             :  * Returns size if found or negative error code.
    3706             :  */
    3707           0 : int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
    3708             : {
    3709             :         int pos;
    3710             :         u32 ctrl;
    3711             : 
    3712           0 :         pos = pci_rebar_find_pos(pdev, bar);
    3713           0 :         if (pos < 0)
    3714             :                 return pos;
    3715             : 
    3716           0 :         pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    3717           0 :         return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
    3718             : }
    3719             : 
    3720             : /**
    3721             :  * pci_rebar_set_size - set a new size for a BAR
    3722             :  * @pdev: PCI device
    3723             :  * @bar: BAR to set size to
    3724             :  * @size: new size as defined in the spec (0=1MB, 19=512GB)
    3725             :  *
    3726             :  * Set the new size of a BAR as defined in the spec.
    3727             :  * Returns zero if resizing was successful, error code otherwise.
    3728             :  */
    3729           0 : int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
    3730             : {
    3731             :         int pos;
    3732             :         u32 ctrl;
    3733             : 
    3734           0 :         pos = pci_rebar_find_pos(pdev, bar);
    3735           0 :         if (pos < 0)
    3736             :                 return pos;
    3737             : 
    3738           0 :         pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
    3739           0 :         ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
    3740           0 :         ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
    3741           0 :         pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
    3742           0 :         return 0;
    3743             : }
    3744             : 
    3745             : /**
    3746             :  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
    3747             :  * @dev: the PCI device
    3748             :  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
    3749             :  *      PCI_EXP_DEVCAP2_ATOMIC_COMP32
    3750             :  *      PCI_EXP_DEVCAP2_ATOMIC_COMP64
    3751             :  *      PCI_EXP_DEVCAP2_ATOMIC_COMP128
    3752             :  *
    3753             :  * Return 0 if all upstream bridges support AtomicOp routing, egress
    3754             :  * blocking is disabled on all upstream ports, and the root port supports
    3755             :  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
    3756             :  * AtomicOp completion), or negative otherwise.
    3757             :  */
    3758           0 : int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
    3759             : {
    3760           0 :         struct pci_bus *bus = dev->bus;
    3761             :         struct pci_dev *bridge;
    3762             :         u32 cap, ctl2;
    3763             : 
    3764             :         /*
    3765             :          * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
    3766             :          * in Device Control 2 is reserved in VFs and the PF value applies
    3767             :          * to all associated VFs.
    3768             :          */
    3769           0 :         if (dev->is_virtfn)
    3770             :                 return -EINVAL;
    3771             : 
    3772           0 :         if (!pci_is_pcie(dev))
    3773             :                 return -EINVAL;
    3774             : 
    3775             :         /*
    3776             :          * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
    3777             :          * AtomicOp requesters.  For now, we only support endpoints as
    3778             :          * requesters and root ports as completers.  No endpoints as
    3779             :          * completers, and no peer-to-peer.
    3780             :          */
    3781             : 
    3782           0 :         switch (pci_pcie_type(dev)) {
    3783             :         case PCI_EXP_TYPE_ENDPOINT:
    3784             :         case PCI_EXP_TYPE_LEG_END:
    3785             :         case PCI_EXP_TYPE_RC_END:
    3786             :                 break;
    3787             :         default:
    3788             :                 return -EINVAL;
    3789             :         }
    3790             : 
    3791           0 :         while (bus->parent) {
    3792           0 :                 bridge = bus->self;
    3793             : 
    3794           0 :                 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
    3795             : 
    3796           0 :                 switch (pci_pcie_type(bridge)) {
    3797             :                 /* Ensure switch ports support AtomicOp routing */
    3798             :                 case PCI_EXP_TYPE_UPSTREAM:
    3799             :                 case PCI_EXP_TYPE_DOWNSTREAM:
    3800           0 :                         if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
    3801             :                                 return -EINVAL;
    3802             :                         break;
    3803             : 
    3804             :                 /* Ensure root port supports all the sizes we care about */
    3805             :                 case PCI_EXP_TYPE_ROOT_PORT:
    3806           0 :                         if ((cap & cap_mask) != cap_mask)
    3807             :                                 return -EINVAL;
    3808             :                         break;
    3809             :                 }
    3810             : 
    3811             :                 /* Ensure upstream ports don't block AtomicOps on egress */
    3812           0 :                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
    3813           0 :                         pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
    3814             :                                                    &ctl2);
    3815           0 :                         if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
    3816             :                                 return -EINVAL;
    3817             :                 }
    3818             : 
    3819           0 :                 bus = bus->parent;
    3820             :         }
    3821             : 
    3822           0 :         pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
    3823             :                                  PCI_EXP_DEVCTL2_ATOMIC_REQ);
    3824           0 :         return 0;
    3825             : }
    3826             : EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
    3827             : 
    3828             : /**
    3829             :  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
    3830             :  * @dev: the PCI device
    3831             :  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
    3832             :  *
    3833             :  * Perform INTx swizzling for a device behind one level of bridge.  This is
    3834             :  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
    3835             :  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
    3836             :  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
    3837             :  * the PCI Express Base Specification, Revision 2.1)
    3838             :  */
    3839           0 : u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
    3840             : {
    3841             :         int slot;
    3842             : 
    3843           0 :         if (pci_ari_enabled(dev->bus))
    3844             :                 slot = 0;
    3845             :         else
    3846           0 :                 slot = PCI_SLOT(dev->devfn);
    3847             : 
    3848           0 :         return (((pin - 1) + slot) % 4) + 1;
    3849             : }
    3850             : 
    3851           0 : int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
    3852             : {
    3853             :         u8 pin;
    3854             : 
    3855           0 :         pin = dev->pin;
    3856           0 :         if (!pin)
    3857             :                 return -1;
    3858             : 
    3859           0 :         while (!pci_is_root_bus(dev->bus)) {
    3860           0 :                 pin = pci_swizzle_interrupt_pin(dev, pin);
    3861           0 :                 dev = dev->bus->self;
    3862             :         }
    3863           0 :         *bridge = dev;
    3864           0 :         return pin;
    3865             : }
    3866             : 
    3867             : /**
    3868             :  * pci_common_swizzle - swizzle INTx all the way to root bridge
    3869             :  * @dev: the PCI device
    3870             :  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
    3871             :  *
    3872             :  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
    3873             :  * bridges all the way up to a PCI root bus.
    3874             :  */
    3875           0 : u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
    3876             : {
    3877           0 :         u8 pin = *pinp;
    3878             : 
    3879           0 :         while (!pci_is_root_bus(dev->bus)) {
    3880           0 :                 pin = pci_swizzle_interrupt_pin(dev, pin);
    3881           0 :                 dev = dev->bus->self;
    3882             :         }
    3883           0 :         *pinp = pin;
    3884           0 :         return PCI_SLOT(dev->devfn);
    3885             : }
    3886             : EXPORT_SYMBOL_GPL(pci_common_swizzle);
    3887             : 
    3888             : /**
    3889             :  * pci_release_region - Release a PCI bar
    3890             :  * @pdev: PCI device whose resources were previously reserved by
    3891             :  *        pci_request_region()
    3892             :  * @bar: BAR to release
    3893             :  *
    3894             :  * Releases the PCI I/O and memory resources previously reserved by a
    3895             :  * successful call to pci_request_region().  Call this function only
    3896             :  * after all use of the PCI regions has ceased.
    3897             :  */
    3898           0 : void pci_release_region(struct pci_dev *pdev, int bar)
    3899             : {
    3900             :         struct pci_devres *dr;
    3901             : 
    3902           0 :         if (pci_resource_len(pdev, bar) == 0)
    3903             :                 return;
    3904           0 :         if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
    3905           0 :                 release_region(pci_resource_start(pdev, bar),
    3906             :                                 pci_resource_len(pdev, bar));
    3907           0 :         else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
    3908           0 :                 release_mem_region(pci_resource_start(pdev, bar),
    3909             :                                 pci_resource_len(pdev, bar));
    3910             : 
    3911           0 :         dr = find_pci_dr(pdev);
    3912           0 :         if (dr)
    3913           0 :                 dr->region_mask &= ~(1 << bar);
    3914             : }
    3915             : EXPORT_SYMBOL(pci_release_region);
    3916             : 
    3917             : /**
    3918             :  * __pci_request_region - Reserved PCI I/O and memory resource
    3919             :  * @pdev: PCI device whose resources are to be reserved
    3920             :  * @bar: BAR to be reserved
    3921             :  * @res_name: Name to be associated with resource.
    3922             :  * @exclusive: whether the region access is exclusive or not
    3923             :  *
    3924             :  * Mark the PCI region associated with PCI device @pdev BAR @bar as
    3925             :  * being reserved by owner @res_name.  Do not access any
    3926             :  * address inside the PCI regions unless this call returns
    3927             :  * successfully.
    3928             :  *
    3929             :  * If @exclusive is set, then the region is marked so that userspace
    3930             :  * is explicitly not allowed to map the resource via /dev/mem or
    3931             :  * sysfs MMIO access.
    3932             :  *
    3933             :  * Returns 0 on success, or %EBUSY on error.  A warning
    3934             :  * message is also printed on failure.
    3935             :  */
    3936           0 : static int __pci_request_region(struct pci_dev *pdev, int bar,
    3937             :                                 const char *res_name, int exclusive)
    3938             : {
    3939             :         struct pci_devres *dr;
    3940             : 
    3941           0 :         if (pci_resource_len(pdev, bar) == 0)
    3942             :                 return 0;
    3943             : 
    3944           0 :         if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
    3945           0 :                 if (!request_region(pci_resource_start(pdev, bar),
    3946             :                             pci_resource_len(pdev, bar), res_name))
    3947             :                         goto err_out;
    3948           0 :         } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
    3949           0 :                 if (!__request_mem_region(pci_resource_start(pdev, bar),
    3950             :                                         pci_resource_len(pdev, bar), res_name,
    3951             :                                         exclusive))
    3952             :                         goto err_out;
    3953             :         }
    3954             : 
    3955           0 :         dr = find_pci_dr(pdev);
    3956           0 :         if (dr)
    3957           0 :                 dr->region_mask |= 1 << bar;
    3958             : 
    3959             :         return 0;
    3960             : 
    3961             : err_out:
    3962           0 :         pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
    3963             :                  &pdev->resource[bar]);
    3964           0 :         return -EBUSY;
    3965             : }
    3966             : 
    3967             : /**
    3968             :  * pci_request_region - Reserve PCI I/O and memory resource
    3969             :  * @pdev: PCI device whose resources are to be reserved
    3970             :  * @bar: BAR to be reserved
    3971             :  * @res_name: Name to be associated with resource
    3972             :  *
    3973             :  * Mark the PCI region associated with PCI device @pdev BAR @bar as
    3974             :  * being reserved by owner @res_name.  Do not access any
    3975             :  * address inside the PCI regions unless this call returns
    3976             :  * successfully.
    3977             :  *
    3978             :  * Returns 0 on success, or %EBUSY on error.  A warning
    3979             :  * message is also printed on failure.
    3980             :  */
    3981           0 : int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
    3982             : {
    3983           0 :         return __pci_request_region(pdev, bar, res_name, 0);
    3984             : }
    3985             : EXPORT_SYMBOL(pci_request_region);
    3986             : 
    3987             : /**
    3988             :  * pci_release_selected_regions - Release selected PCI I/O and memory resources
    3989             :  * @pdev: PCI device whose resources were previously reserved
    3990             :  * @bars: Bitmask of BARs to be released
    3991             :  *
    3992             :  * Release selected PCI I/O and memory resources previously reserved.
    3993             :  * Call this function only after all use of the PCI regions has ceased.
    3994             :  */
    3995           0 : void pci_release_selected_regions(struct pci_dev *pdev, int bars)
    3996             : {
    3997             :         int i;
    3998             : 
    3999           0 :         for (i = 0; i < PCI_STD_NUM_BARS; i++)
    4000           0 :                 if (bars & (1 << i))
    4001           0 :                         pci_release_region(pdev, i);
    4002           0 : }
    4003             : EXPORT_SYMBOL(pci_release_selected_regions);
    4004             : 
    4005           0 : static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
    4006             :                                           const char *res_name, int excl)
    4007             : {
    4008             :         int i;
    4009             : 
    4010           0 :         for (i = 0; i < PCI_STD_NUM_BARS; i++)
    4011           0 :                 if (bars & (1 << i))
    4012           0 :                         if (__pci_request_region(pdev, i, res_name, excl))
    4013             :                                 goto err_out;
    4014             :         return 0;
    4015             : 
    4016             : err_out:
    4017           0 :         while (--i >= 0)
    4018           0 :                 if (bars & (1 << i))
    4019           0 :                         pci_release_region(pdev, i);
    4020             : 
    4021             :         return -EBUSY;
    4022             : }
    4023             : 
    4024             : 
    4025             : /**
    4026             :  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
    4027             :  * @pdev: PCI device whose resources are to be reserved
    4028             :  * @bars: Bitmask of BARs to be requested
    4029             :  * @res_name: Name to be associated with resource
    4030             :  */
    4031           0 : int pci_request_selected_regions(struct pci_dev *pdev, int bars,
    4032             :                                  const char *res_name)
    4033             : {
    4034           0 :         return __pci_request_selected_regions(pdev, bars, res_name, 0);
    4035             : }
    4036             : EXPORT_SYMBOL(pci_request_selected_regions);
    4037             : 
    4038           0 : int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
    4039             :                                            const char *res_name)
    4040             : {
    4041           0 :         return __pci_request_selected_regions(pdev, bars, res_name,
    4042             :                         IORESOURCE_EXCLUSIVE);
    4043             : }
    4044             : EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
    4045             : 
    4046             : /**
    4047             :  * pci_release_regions - Release reserved PCI I/O and memory resources
    4048             :  * @pdev: PCI device whose resources were previously reserved by
    4049             :  *        pci_request_regions()
    4050             :  *
    4051             :  * Releases all PCI I/O and memory resources previously reserved by a
    4052             :  * successful call to pci_request_regions().  Call this function only
    4053             :  * after all use of the PCI regions has ceased.
    4054             :  */
    4055             : 
    4056           0 : void pci_release_regions(struct pci_dev *pdev)
    4057             : {
    4058           0 :         pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
    4059           0 : }
    4060             : EXPORT_SYMBOL(pci_release_regions);
    4061             : 
    4062             : /**
    4063             :  * pci_request_regions - Reserve PCI I/O and memory resources
    4064             :  * @pdev: PCI device whose resources are to be reserved
    4065             :  * @res_name: Name to be associated with resource.
    4066             :  *
    4067             :  * Mark all PCI regions associated with PCI device @pdev as
    4068             :  * being reserved by owner @res_name.  Do not access any
    4069             :  * address inside the PCI regions unless this call returns
    4070             :  * successfully.
    4071             :  *
    4072             :  * Returns 0 on success, or %EBUSY on error.  A warning
    4073             :  * message is also printed on failure.
    4074             :  */
    4075           0 : int pci_request_regions(struct pci_dev *pdev, const char *res_name)
    4076             : {
    4077           0 :         return pci_request_selected_regions(pdev,
    4078             :                         ((1 << PCI_STD_NUM_BARS) - 1), res_name);
    4079             : }
    4080             : EXPORT_SYMBOL(pci_request_regions);
    4081             : 
    4082             : /**
    4083             :  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
    4084             :  * @pdev: PCI device whose resources are to be reserved
    4085             :  * @res_name: Name to be associated with resource.
    4086             :  *
    4087             :  * Mark all PCI regions associated with PCI device @pdev as being reserved
    4088             :  * by owner @res_name.  Do not access any address inside the PCI regions
    4089             :  * unless this call returns successfully.
    4090             :  *
    4091             :  * pci_request_regions_exclusive() will mark the region so that /dev/mem
    4092             :  * and the sysfs MMIO access will not be allowed.
    4093             :  *
    4094             :  * Returns 0 on success, or %EBUSY on error.  A warning message is also
    4095             :  * printed on failure.
    4096             :  */
    4097           0 : int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
    4098             : {
    4099           0 :         return pci_request_selected_regions_exclusive(pdev,
    4100             :                                 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
    4101             : }
    4102             : EXPORT_SYMBOL(pci_request_regions_exclusive);
    4103             : 
    4104             : /*
    4105             :  * Record the PCI IO range (expressed as CPU physical address + size).
    4106             :  * Return a negative value if an error has occurred, zero otherwise
    4107             :  */
    4108           0 : int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
    4109             :                         resource_size_t size)
    4110             : {
    4111           0 :         int ret = 0;
    4112             : #ifdef PCI_IOBASE
    4113             :         struct logic_pio_hwaddr *range;
    4114             : 
    4115           0 :         if (!size || addr + size < addr)
    4116             :                 return -EINVAL;
    4117             : 
    4118           0 :         range = kzalloc(sizeof(*range), GFP_ATOMIC);
    4119           0 :         if (!range)
    4120             :                 return -ENOMEM;
    4121             : 
    4122           0 :         range->fwnode = fwnode;
    4123           0 :         range->size = size;
    4124           0 :         range->hw_start = addr;
    4125           0 :         range->flags = LOGIC_PIO_CPU_MMIO;
    4126             : 
    4127           0 :         ret = logic_pio_register_range(range);
    4128           0 :         if (ret)
    4129           0 :                 kfree(range);
    4130             : 
    4131             :         /* Ignore duplicates due to deferred probing */
    4132           0 :         if (ret == -EEXIST)
    4133           0 :                 ret = 0;
    4134             : #endif
    4135             : 
    4136             :         return ret;
    4137             : }
    4138             : 
    4139           0 : phys_addr_t pci_pio_to_address(unsigned long pio)
    4140             : {
    4141           0 :         phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
    4142             : 
    4143             : #ifdef PCI_IOBASE
    4144           0 :         if (pio >= MMIO_UPPER_LIMIT)
    4145             :                 return address;
    4146             : 
    4147           0 :         address = logic_pio_to_hwaddr(pio);
    4148             : #endif
    4149             : 
    4150           0 :         return address;
    4151             : }
    4152             : EXPORT_SYMBOL_GPL(pci_pio_to_address);
    4153             : 
    4154           0 : unsigned long __weak pci_address_to_pio(phys_addr_t address)
    4155             : {
    4156             : #ifdef PCI_IOBASE
    4157           0 :         return logic_pio_trans_cpuaddr(address);
    4158             : #else
    4159             :         if (address > IO_SPACE_LIMIT)
    4160             :                 return (unsigned long)-1;
    4161             : 
    4162             :         return (unsigned long) address;
    4163             : #endif
    4164             : }
    4165             : 
    4166             : /**
    4167             :  * pci_remap_iospace - Remap the memory mapped I/O space
    4168             :  * @res: Resource describing the I/O space
    4169             :  * @phys_addr: physical address of range to be mapped
    4170             :  *
    4171             :  * Remap the memory mapped I/O space described by the @res and the CPU
    4172             :  * physical address @phys_addr into virtual address space.  Only
    4173             :  * architectures that have memory mapped IO functions defined (and the
    4174             :  * PCI_IOBASE value defined) should call this function.
    4175             :  */
    4176             : #ifndef pci_remap_iospace
    4177           0 : int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
    4178             : {
    4179             : #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
    4180           0 :         unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
    4181             : 
    4182           0 :         if (!(res->flags & IORESOURCE_IO))
    4183             :                 return -EINVAL;
    4184             : 
    4185           0 :         if (res->end > IO_SPACE_LIMIT)
    4186             :                 return -EINVAL;
    4187             : 
    4188           0 :         return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
    4189           0 :                                   pgprot_device(PAGE_KERNEL));
    4190             : #else
    4191             :         /*
    4192             :          * This architecture does not have memory mapped I/O space,
    4193             :          * so this function should never be called
    4194             :          */
    4195             :         WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
    4196             :         return -ENODEV;
    4197             : #endif
    4198             : }
    4199             : EXPORT_SYMBOL(pci_remap_iospace);
    4200             : #endif
    4201             : 
    4202             : /**
    4203             :  * pci_unmap_iospace - Unmap the memory mapped I/O space
    4204             :  * @res: resource to be unmapped
    4205             :  *
    4206             :  * Unmap the CPU virtual address @res from virtual address space.  Only
    4207             :  * architectures that have memory mapped IO functions defined (and the
    4208             :  * PCI_IOBASE value defined) should call this function.
    4209             :  */
    4210           0 : void pci_unmap_iospace(struct resource *res)
    4211             : {
    4212             : #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
    4213           0 :         unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
    4214             : 
    4215           0 :         vunmap_range(vaddr, vaddr + resource_size(res));
    4216             : #endif
    4217           0 : }
    4218             : EXPORT_SYMBOL(pci_unmap_iospace);
    4219             : 
    4220           0 : static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
    4221             : {
    4222           0 :         struct resource **res = ptr;
    4223             : 
    4224           0 :         pci_unmap_iospace(*res);
    4225           0 : }
    4226             : 
    4227             : /**
    4228             :  * devm_pci_remap_iospace - Managed pci_remap_iospace()
    4229             :  * @dev: Generic device to remap IO address for
    4230             :  * @res: Resource describing the I/O space
    4231             :  * @phys_addr: physical address of range to be mapped
    4232             :  *
    4233             :  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
    4234             :  * detach.
    4235             :  */
    4236           0 : int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
    4237             :                            phys_addr_t phys_addr)
    4238             : {
    4239             :         const struct resource **ptr;
    4240             :         int error;
    4241             : 
    4242           0 :         ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
    4243           0 :         if (!ptr)
    4244             :                 return -ENOMEM;
    4245             : 
    4246           0 :         error = pci_remap_iospace(res, phys_addr);
    4247           0 :         if (error) {
    4248           0 :                 devres_free(ptr);
    4249             :         } else  {
    4250           0 :                 *ptr = res;
    4251           0 :                 devres_add(dev, ptr);
    4252             :         }
    4253             : 
    4254             :         return error;
    4255             : }
    4256             : EXPORT_SYMBOL(devm_pci_remap_iospace);
    4257             : 
    4258             : /**
    4259             :  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
    4260             :  * @dev: Generic device to remap IO address for
    4261             :  * @offset: Resource address to map
    4262             :  * @size: Size of map
    4263             :  *
    4264             :  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
    4265             :  * detach.
    4266             :  */
    4267           0 : void __iomem *devm_pci_remap_cfgspace(struct device *dev,
    4268             :                                       resource_size_t offset,
    4269             :                                       resource_size_t size)
    4270             : {
    4271             :         void __iomem **ptr, *addr;
    4272             : 
    4273           0 :         ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
    4274           0 :         if (!ptr)
    4275             :                 return NULL;
    4276             : 
    4277           0 :         addr = pci_remap_cfgspace(offset, size);
    4278           0 :         if (addr) {
    4279           0 :                 *ptr = addr;
    4280           0 :                 devres_add(dev, ptr);
    4281             :         } else
    4282           0 :                 devres_free(ptr);
    4283             : 
    4284             :         return addr;
    4285             : }
    4286             : EXPORT_SYMBOL(devm_pci_remap_cfgspace);
    4287             : 
    4288             : /**
    4289             :  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
    4290             :  * @dev: generic device to handle the resource for
    4291             :  * @res: configuration space resource to be handled
    4292             :  *
    4293             :  * Checks that a resource is a valid memory region, requests the memory
    4294             :  * region and ioremaps with pci_remap_cfgspace() API that ensures the
    4295             :  * proper PCI configuration space memory attributes are guaranteed.
    4296             :  *
    4297             :  * All operations are managed and will be undone on driver detach.
    4298             :  *
    4299             :  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
    4300             :  * on failure. Usage example::
    4301             :  *
    4302             :  *      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    4303             :  *      base = devm_pci_remap_cfg_resource(&pdev->dev, res);
    4304             :  *      if (IS_ERR(base))
    4305             :  *              return PTR_ERR(base);
    4306             :  */
    4307           0 : void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
    4308             :                                           struct resource *res)
    4309             : {
    4310             :         resource_size_t size;
    4311             :         const char *name;
    4312             :         void __iomem *dest_ptr;
    4313             : 
    4314           0 :         BUG_ON(!dev);
    4315             : 
    4316           0 :         if (!res || resource_type(res) != IORESOURCE_MEM) {
    4317           0 :                 dev_err(dev, "invalid resource\n");
    4318           0 :                 return IOMEM_ERR_PTR(-EINVAL);
    4319             :         }
    4320             : 
    4321           0 :         size = resource_size(res);
    4322             : 
    4323           0 :         if (res->name)
    4324           0 :                 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
    4325             :                                       res->name);
    4326             :         else
    4327           0 :                 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
    4328           0 :         if (!name)
    4329             :                 return IOMEM_ERR_PTR(-ENOMEM);
    4330             : 
    4331           0 :         if (!devm_request_mem_region(dev, res->start, size, name)) {
    4332           0 :                 dev_err(dev, "can't request region for resource %pR\n", res);
    4333           0 :                 return IOMEM_ERR_PTR(-EBUSY);
    4334             :         }
    4335             : 
    4336           0 :         dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
    4337           0 :         if (!dest_ptr) {
    4338           0 :                 dev_err(dev, "ioremap failed for resource %pR\n", res);
    4339           0 :                 devm_release_mem_region(dev, res->start, size);
    4340           0 :                 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
    4341             :         }
    4342             : 
    4343             :         return dest_ptr;
    4344             : }
    4345             : EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
    4346             : 
    4347           0 : static void __pci_set_master(struct pci_dev *dev, bool enable)
    4348             : {
    4349             :         u16 old_cmd, cmd;
    4350             : 
    4351           0 :         pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
    4352           0 :         if (enable)
    4353           0 :                 cmd = old_cmd | PCI_COMMAND_MASTER;
    4354             :         else
    4355           0 :                 cmd = old_cmd & ~PCI_COMMAND_MASTER;
    4356           0 :         if (cmd != old_cmd) {
    4357             :                 pci_dbg(dev, "%s bus mastering\n",
    4358             :                         enable ? "enabling" : "disabling");
    4359           0 :                 pci_write_config_word(dev, PCI_COMMAND, cmd);
    4360             :         }
    4361           0 :         dev->is_busmaster = enable;
    4362           0 : }
    4363             : 
    4364             : /**
    4365             :  * pcibios_setup - process "pci=" kernel boot arguments
    4366             :  * @str: string used to pass in "pci=" kernel boot arguments
    4367             :  *
    4368             :  * Process kernel boot arguments.  This is the default implementation.
    4369             :  * Architecture specific implementations can override this as necessary.
    4370             :  */
    4371           0 : char * __weak __init pcibios_setup(char *str)
    4372             : {
    4373           0 :         return str;
    4374             : }
    4375             : 
    4376             : /**
    4377             :  * pcibios_set_master - enable PCI bus-mastering for device dev
    4378             :  * @dev: the PCI device to enable
    4379             :  *
    4380             :  * Enables PCI bus-mastering for the device.  This is the default
    4381             :  * implementation.  Architecture specific implementations can override
    4382             :  * this if necessary.
    4383             :  */
    4384           0 : void __weak pcibios_set_master(struct pci_dev *dev)
    4385             : {
    4386             :         u8 lat;
    4387             : 
    4388             :         /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
    4389           0 :         if (pci_is_pcie(dev))
    4390           0 :                 return;
    4391             : 
    4392           0 :         pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
    4393           0 :         if (lat < 16)
    4394           0 :                 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
    4395           0 :         else if (lat > pcibios_max_latency)
    4396           0 :                 lat = pcibios_max_latency;
    4397             :         else
    4398             :                 return;
    4399             : 
    4400           0 :         pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
    4401             : }
    4402             : 
    4403             : /**
    4404             :  * pci_set_master - enables bus-mastering for device dev
    4405             :  * @dev: the PCI device to enable
    4406             :  *
    4407             :  * Enables bus-mastering on the device and calls pcibios_set_master()
    4408             :  * to do the needed arch specific settings.
    4409             :  */
    4410           0 : void pci_set_master(struct pci_dev *dev)
    4411             : {
    4412           0 :         __pci_set_master(dev, true);
    4413           0 :         pcibios_set_master(dev);
    4414           0 : }
    4415             : EXPORT_SYMBOL(pci_set_master);
    4416             : 
    4417             : /**
    4418             :  * pci_clear_master - disables bus-mastering for device dev
    4419             :  * @dev: the PCI device to disable
    4420             :  */
    4421           0 : void pci_clear_master(struct pci_dev *dev)
    4422             : {
    4423           0 :         __pci_set_master(dev, false);
    4424           0 : }
    4425             : EXPORT_SYMBOL(pci_clear_master);
    4426             : 
    4427             : /**
    4428             :  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
    4429             :  * @dev: the PCI device for which MWI is to be enabled
    4430             :  *
    4431             :  * Helper function for pci_set_mwi.
    4432             :  * Originally copied from drivers/net/acenic.c.
    4433             :  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
    4434             :  *
    4435             :  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
    4436             :  */
    4437           0 : int pci_set_cacheline_size(struct pci_dev *dev)
    4438             : {
    4439             :         u8 cacheline_size;
    4440             : 
    4441           0 :         if (!pci_cache_line_size)
    4442             :                 return -EINVAL;
    4443             : 
    4444             :         /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
    4445             :            equal to or multiple of the right value. */
    4446           0 :         pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
    4447           0 :         if (cacheline_size >= pci_cache_line_size &&
    4448           0 :             (cacheline_size % pci_cache_line_size) == 0)
    4449             :                 return 0;
    4450             : 
    4451             :         /* Write the correct value. */
    4452           0 :         pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
    4453             :         /* Read it back. */
    4454           0 :         pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
    4455           0 :         if (cacheline_size == pci_cache_line_size)
    4456             :                 return 0;
    4457             : 
    4458             :         pci_dbg(dev, "cache line size of %d is not supported\n",
    4459             :                    pci_cache_line_size << 2);
    4460             : 
    4461           0 :         return -EINVAL;
    4462             : }
    4463             : EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
    4464             : 
    4465             : /**
    4466             :  * pci_set_mwi - enables memory-write-invalidate PCI transaction
    4467             :  * @dev: the PCI device for which MWI is enabled
    4468             :  *
    4469             :  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
    4470             :  *
    4471             :  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
    4472             :  */
    4473           0 : int pci_set_mwi(struct pci_dev *dev)
    4474             : {
    4475             : #ifdef PCI_DISABLE_MWI
    4476             :         return 0;
    4477             : #else
    4478             :         int rc;
    4479             :         u16 cmd;
    4480             : 
    4481           0 :         rc = pci_set_cacheline_size(dev);
    4482           0 :         if (rc)
    4483             :                 return rc;
    4484             : 
    4485           0 :         pci_read_config_word(dev, PCI_COMMAND, &cmd);
    4486           0 :         if (!(cmd & PCI_COMMAND_INVALIDATE)) {
    4487             :                 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
    4488           0 :                 cmd |= PCI_COMMAND_INVALIDATE;
    4489           0 :                 pci_write_config_word(dev, PCI_COMMAND, cmd);
    4490             :         }
    4491             :         return 0;
    4492             : #endif
    4493             : }
    4494             : EXPORT_SYMBOL(pci_set_mwi);
    4495             : 
    4496             : /**
    4497             :  * pcim_set_mwi - a device-managed pci_set_mwi()
    4498             :  * @dev: the PCI device for which MWI is enabled
    4499             :  *
    4500             :  * Managed pci_set_mwi().
    4501             :  *
    4502             :  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
    4503             :  */
    4504           0 : int pcim_set_mwi(struct pci_dev *dev)
    4505             : {
    4506             :         struct pci_devres *dr;
    4507             : 
    4508           0 :         dr = find_pci_dr(dev);
    4509           0 :         if (!dr)
    4510             :                 return -ENOMEM;
    4511             : 
    4512           0 :         dr->mwi = 1;
    4513           0 :         return pci_set_mwi(dev);
    4514             : }
    4515             : EXPORT_SYMBOL(pcim_set_mwi);
    4516             : 
    4517             : /**
    4518             :  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
    4519             :  * @dev: the PCI device for which MWI is enabled
    4520             :  *
    4521             :  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
    4522             :  * Callers are not required to check the return value.
    4523             :  *
    4524             :  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
    4525             :  */
    4526           0 : int pci_try_set_mwi(struct pci_dev *dev)
    4527             : {
    4528             : #ifdef PCI_DISABLE_MWI
    4529             :         return 0;
    4530             : #else
    4531           0 :         return pci_set_mwi(dev);
    4532             : #endif
    4533             : }
    4534             : EXPORT_SYMBOL(pci_try_set_mwi);
    4535             : 
    4536             : /**
    4537             :  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
    4538             :  * @dev: the PCI device to disable
    4539             :  *
    4540             :  * Disables PCI Memory-Write-Invalidate transaction on the device
    4541             :  */
    4542           0 : void pci_clear_mwi(struct pci_dev *dev)
    4543             : {
    4544             : #ifndef PCI_DISABLE_MWI
    4545             :         u16 cmd;
    4546             : 
    4547           0 :         pci_read_config_word(dev, PCI_COMMAND, &cmd);
    4548           0 :         if (cmd & PCI_COMMAND_INVALIDATE) {
    4549           0 :                 cmd &= ~PCI_COMMAND_INVALIDATE;
    4550           0 :                 pci_write_config_word(dev, PCI_COMMAND, cmd);
    4551             :         }
    4552             : #endif
    4553           0 : }
    4554             : EXPORT_SYMBOL(pci_clear_mwi);
    4555             : 
    4556             : /**
    4557             :  * pci_disable_parity - disable parity checking for device
    4558             :  * @dev: the PCI device to operate on
    4559             :  *
    4560             :  * Disable parity checking for device @dev
    4561             :  */
    4562           0 : void pci_disable_parity(struct pci_dev *dev)
    4563             : {
    4564             :         u16 cmd;
    4565             : 
    4566           0 :         pci_read_config_word(dev, PCI_COMMAND, &cmd);
    4567           0 :         if (cmd & PCI_COMMAND_PARITY) {
    4568           0 :                 cmd &= ~PCI_COMMAND_PARITY;
    4569           0 :                 pci_write_config_word(dev, PCI_COMMAND, cmd);
    4570             :         }
    4571           0 : }
    4572             : 
    4573             : /**
    4574             :  * pci_intx - enables/disables PCI INTx for device dev
    4575             :  * @pdev: the PCI device to operate on
    4576             :  * @enable: boolean: whether to enable or disable PCI INTx
    4577             :  *
    4578             :  * Enables/disables PCI INTx for device @pdev
    4579             :  */
    4580           0 : void pci_intx(struct pci_dev *pdev, int enable)
    4581             : {
    4582             :         u16 pci_command, new;
    4583             : 
    4584           0 :         pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
    4585             : 
    4586           0 :         if (enable)
    4587           0 :                 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
    4588             :         else
    4589           0 :                 new = pci_command | PCI_COMMAND_INTX_DISABLE;
    4590             : 
    4591           0 :         if (new != pci_command) {
    4592             :                 struct pci_devres *dr;
    4593             : 
    4594           0 :                 pci_write_config_word(pdev, PCI_COMMAND, new);
    4595             : 
    4596           0 :                 dr = find_pci_dr(pdev);
    4597           0 :                 if (dr && !dr->restore_intx) {
    4598           0 :                         dr->restore_intx = 1;
    4599           0 :                         dr->orig_intx = !enable;
    4600             :                 }
    4601             :         }
    4602           0 : }
    4603             : EXPORT_SYMBOL_GPL(pci_intx);
    4604             : 
    4605           0 : static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
    4606             : {
    4607           0 :         struct pci_bus *bus = dev->bus;
    4608           0 :         bool mask_updated = true;
    4609             :         u32 cmd_status_dword;
    4610             :         u16 origcmd, newcmd;
    4611             :         unsigned long flags;
    4612             :         bool irq_pending;
    4613             : 
    4614             :         /*
    4615             :          * We do a single dword read to retrieve both command and status.
    4616             :          * Document assumptions that make this possible.
    4617             :          */
    4618             :         BUILD_BUG_ON(PCI_COMMAND % 4);
    4619             :         BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
    4620             : 
    4621           0 :         raw_spin_lock_irqsave(&pci_lock, flags);
    4622             : 
    4623           0 :         bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
    4624             : 
    4625           0 :         irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
    4626             : 
    4627             :         /*
    4628             :          * Check interrupt status register to see whether our device
    4629             :          * triggered the interrupt (when masking) or the next IRQ is
    4630             :          * already pending (when unmasking).
    4631             :          */
    4632           0 :         if (mask != irq_pending) {
    4633             :                 mask_updated = false;
    4634             :                 goto done;
    4635             :         }
    4636             : 
    4637           0 :         origcmd = cmd_status_dword;
    4638           0 :         newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
    4639           0 :         if (mask)
    4640           0 :                 newcmd |= PCI_COMMAND_INTX_DISABLE;
    4641           0 :         if (newcmd != origcmd)
    4642           0 :                 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
    4643             : 
    4644             : done:
    4645           0 :         raw_spin_unlock_irqrestore(&pci_lock, flags);
    4646             : 
    4647           0 :         return mask_updated;
    4648             : }
    4649             : 
    4650             : /**
    4651             :  * pci_check_and_mask_intx - mask INTx on pending interrupt
    4652             :  * @dev: the PCI device to operate on
    4653             :  *
    4654             :  * Check if the device dev has its INTx line asserted, mask it and return
    4655             :  * true in that case. False is returned if no interrupt was pending.
    4656             :  */
    4657           0 : bool pci_check_and_mask_intx(struct pci_dev *dev)
    4658             : {
    4659           0 :         return pci_check_and_set_intx_mask(dev, true);
    4660             : }
    4661             : EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
    4662             : 
    4663             : /**
    4664             :  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
    4665             :  * @dev: the PCI device to operate on
    4666             :  *
    4667             :  * Check if the device dev has its INTx line asserted, unmask it if not and
    4668             :  * return true. False is returned and the mask remains active if there was
    4669             :  * still an interrupt pending.
    4670             :  */
    4671           0 : bool pci_check_and_unmask_intx(struct pci_dev *dev)
    4672             : {
    4673           0 :         return pci_check_and_set_intx_mask(dev, false);
    4674             : }
    4675             : EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
    4676             : 
    4677             : /**
    4678             :  * pci_wait_for_pending_transaction - wait for pending transaction
    4679             :  * @dev: the PCI device to operate on
    4680             :  *
    4681             :  * Return 0 if transaction is pending 1 otherwise.
    4682             :  */
    4683           0 : int pci_wait_for_pending_transaction(struct pci_dev *dev)
    4684             : {
    4685           0 :         if (!pci_is_pcie(dev))
    4686             :                 return 1;
    4687             : 
    4688           0 :         return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
    4689             :                                     PCI_EXP_DEVSTA_TRPND);
    4690             : }
    4691             : EXPORT_SYMBOL(pci_wait_for_pending_transaction);
    4692             : 
    4693             : /**
    4694             :  * pcie_flr - initiate a PCIe function level reset
    4695             :  * @dev: device to reset
    4696             :  *
    4697             :  * Initiate a function level reset unconditionally on @dev without
    4698             :  * checking any flags and DEVCAP
    4699             :  */
    4700           0 : int pcie_flr(struct pci_dev *dev)
    4701             : {
    4702           0 :         if (!pci_wait_for_pending_transaction(dev))
    4703           0 :                 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
    4704             : 
    4705           0 :         pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
    4706             : 
    4707           0 :         if (dev->imm_ready)
    4708             :                 return 0;
    4709             : 
    4710             :         /*
    4711             :          * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
    4712             :          * 100ms, but may silently discard requests while the FLR is in
    4713             :          * progress.  Wait 100ms before trying to access the device.
    4714             :          */
    4715           0 :         msleep(100);
    4716             : 
    4717           0 :         return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
    4718             : }
    4719             : EXPORT_SYMBOL_GPL(pcie_flr);
    4720             : 
    4721             : /**
    4722             :  * pcie_reset_flr - initiate a PCIe function level reset
    4723             :  * @dev: device to reset
    4724             :  * @probe: if true, return 0 if device can be reset this way
    4725             :  *
    4726             :  * Initiate a function level reset on @dev.
    4727             :  */
    4728           0 : int pcie_reset_flr(struct pci_dev *dev, bool probe)
    4729             : {
    4730           0 :         if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
    4731             :                 return -ENOTTY;
    4732             : 
    4733           0 :         if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
    4734             :                 return -ENOTTY;
    4735             : 
    4736           0 :         if (probe)
    4737             :                 return 0;
    4738             : 
    4739           0 :         return pcie_flr(dev);
    4740             : }
    4741             : EXPORT_SYMBOL_GPL(pcie_reset_flr);
    4742             : 
    4743           0 : static int pci_af_flr(struct pci_dev *dev, bool probe)
    4744             : {
    4745             :         int pos;
    4746             :         u8 cap;
    4747             : 
    4748           0 :         pos = pci_find_capability(dev, PCI_CAP_ID_AF);
    4749           0 :         if (!pos)
    4750             :                 return -ENOTTY;
    4751             : 
    4752           0 :         if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
    4753             :                 return -ENOTTY;
    4754             : 
    4755           0 :         pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
    4756           0 :         if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
    4757             :                 return -ENOTTY;
    4758             : 
    4759           0 :         if (probe)
    4760             :                 return 0;
    4761             : 
    4762             :         /*
    4763             :          * Wait for Transaction Pending bit to clear.  A word-aligned test
    4764             :          * is used, so we use the control offset rather than status and shift
    4765             :          * the test bit to match.
    4766             :          */
    4767           0 :         if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
    4768             :                                  PCI_AF_STATUS_TP << 8))
    4769           0 :                 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
    4770             : 
    4771           0 :         pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
    4772             : 
    4773           0 :         if (dev->imm_ready)
    4774             :                 return 0;
    4775             : 
    4776             :         /*
    4777             :          * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
    4778             :          * updated 27 July 2006; a device must complete an FLR within
    4779             :          * 100ms, but may silently discard requests while the FLR is in
    4780             :          * progress.  Wait 100ms before trying to access the device.
    4781             :          */
    4782           0 :         msleep(100);
    4783             : 
    4784           0 :         return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
    4785             : }
    4786             : 
    4787             : /**
    4788             :  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
    4789             :  * @dev: Device to reset.
    4790             :  * @probe: if true, return 0 if the device can be reset this way.
    4791             :  *
    4792             :  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
    4793             :  * unset, it will be reinitialized internally when going from PCI_D3hot to
    4794             :  * PCI_D0.  If that's the case and the device is not in a low-power state
    4795             :  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
    4796             :  *
    4797             :  * NOTE: This causes the caller to sleep for twice the device power transition
    4798             :  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
    4799             :  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
    4800             :  * Moreover, only devices in D0 can be reset by this function.
    4801             :  */
    4802           0 : static int pci_pm_reset(struct pci_dev *dev, bool probe)
    4803             : {
    4804             :         u16 csr;
    4805             : 
    4806           0 :         if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
    4807             :                 return -ENOTTY;
    4808             : 
    4809           0 :         pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
    4810           0 :         if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
    4811             :                 return -ENOTTY;
    4812             : 
    4813           0 :         if (probe)
    4814             :                 return 0;
    4815             : 
    4816           0 :         if (dev->current_state != PCI_D0)
    4817             :                 return -EINVAL;
    4818             : 
    4819           0 :         csr &= ~PCI_PM_CTRL_STATE_MASK;
    4820           0 :         csr |= PCI_D3hot;
    4821           0 :         pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
    4822           0 :         pci_dev_d3_sleep(dev);
    4823             : 
    4824           0 :         csr &= ~PCI_PM_CTRL_STATE_MASK;
    4825             :         csr |= PCI_D0;
    4826           0 :         pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
    4827           0 :         pci_dev_d3_sleep(dev);
    4828             : 
    4829           0 :         return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
    4830             : }
    4831             : 
    4832             : /**
    4833             :  * pcie_wait_for_link_delay - Wait until link is active or inactive
    4834             :  * @pdev: Bridge device
    4835             :  * @active: waiting for active or inactive?
    4836             :  * @delay: Delay to wait after link has become active (in ms)
    4837             :  *
    4838             :  * Use this to wait till link becomes active or inactive.
    4839             :  */
    4840           0 : static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
    4841             :                                      int delay)
    4842             : {
    4843           0 :         int timeout = 1000;
    4844             :         bool ret;
    4845             :         u16 lnk_status;
    4846             : 
    4847             :         /*
    4848             :          * Some controllers might not implement link active reporting. In this
    4849             :          * case, we wait for 1000 ms + any delay requested by the caller.
    4850             :          */
    4851           0 :         if (!pdev->link_active_reporting) {
    4852           0 :                 msleep(timeout + delay);
    4853           0 :                 return true;
    4854             :         }
    4855             : 
    4856             :         /*
    4857             :          * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
    4858             :          * after which we should expect an link active if the reset was
    4859             :          * successful. If so, software must wait a minimum 100ms before sending
    4860             :          * configuration requests to devices downstream this port.
    4861             :          *
    4862             :          * If the link fails to activate, either the device was physically
    4863             :          * removed or the link is permanently failed.
    4864             :          */
    4865           0 :         if (active)
    4866           0 :                 msleep(20);
    4867             :         for (;;) {
    4868           0 :                 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
    4869           0 :                 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
    4870           0 :                 if (ret == active)
    4871             :                         break;
    4872           0 :                 if (timeout <= 0)
    4873             :                         break;
    4874           0 :                 msleep(10);
    4875           0 :                 timeout -= 10;
    4876             :         }
    4877           0 :         if (active && ret)
    4878           0 :                 msleep(delay);
    4879             : 
    4880           0 :         return ret == active;
    4881             : }
    4882             : 
    4883             : /**
    4884             :  * pcie_wait_for_link - Wait until link is active or inactive
    4885             :  * @pdev: Bridge device
    4886             :  * @active: waiting for active or inactive?
    4887             :  *
    4888             :  * Use this to wait till link becomes active or inactive.
    4889             :  */
    4890           0 : bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
    4891             : {
    4892           0 :         return pcie_wait_for_link_delay(pdev, active, 100);
    4893             : }
    4894             : 
    4895             : /*
    4896             :  * Find maximum D3cold delay required by all the devices on the bus.  The
    4897             :  * spec says 100 ms, but firmware can lower it and we allow drivers to
    4898             :  * increase it as well.
    4899             :  *
    4900             :  * Called with @pci_bus_sem locked for reading.
    4901             :  */
    4902             : static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
    4903             : {
    4904             :         const struct pci_dev *pdev;
    4905           0 :         int min_delay = 100;
    4906           0 :         int max_delay = 0;
    4907             : 
    4908           0 :         list_for_each_entry(pdev, &bus->devices, bus_list) {
    4909           0 :                 if (pdev->d3cold_delay < min_delay)
    4910           0 :                         min_delay = pdev->d3cold_delay;
    4911           0 :                 if (pdev->d3cold_delay > max_delay)
    4912           0 :                         max_delay = pdev->d3cold_delay;
    4913             :         }
    4914             : 
    4915           0 :         return max(min_delay, max_delay);
    4916             : }
    4917             : 
    4918             : /**
    4919             :  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
    4920             :  * @dev: PCI bridge
    4921             :  *
    4922             :  * Handle necessary delays before access to the devices on the secondary
    4923             :  * side of the bridge are permitted after D3cold to D0 transition.
    4924             :  *
    4925             :  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
    4926             :  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
    4927             :  * 4.3.2.
    4928             :  */
    4929           0 : void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
    4930             : {
    4931             :         struct pci_dev *child;
    4932             :         int delay;
    4933             : 
    4934           0 :         if (pci_dev_is_disconnected(dev))
    4935             :                 return;
    4936             : 
    4937           0 :         if (!pci_is_bridge(dev) || !dev->bridge_d3)
    4938             :                 return;
    4939             : 
    4940           0 :         down_read(&pci_bus_sem);
    4941             : 
    4942             :         /*
    4943             :          * We only deal with devices that are present currently on the bus.
    4944             :          * For any hot-added devices the access delay is handled in pciehp
    4945             :          * board_added(). In case of ACPI hotplug the firmware is expected
    4946             :          * to configure the devices before OS is notified.
    4947             :          */
    4948           0 :         if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
    4949           0 :                 up_read(&pci_bus_sem);
    4950           0 :                 return;
    4951             :         }
    4952             : 
    4953             :         /* Take d3cold_delay requirements into account */
    4954           0 :         delay = pci_bus_max_d3cold_delay(dev->subordinate);
    4955           0 :         if (!delay) {
    4956           0 :                 up_read(&pci_bus_sem);
    4957           0 :                 return;
    4958             :         }
    4959             : 
    4960           0 :         child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
    4961             :                                  bus_list);
    4962           0 :         up_read(&pci_bus_sem);
    4963             : 
    4964             :         /*
    4965             :          * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
    4966             :          * accessing the device after reset (that is 1000 ms + 100 ms). In
    4967             :          * practice this should not be needed because we don't do power
    4968             :          * management for them (see pci_bridge_d3_possible()).
    4969             :          */
    4970           0 :         if (!pci_is_pcie(dev)) {
    4971             :                 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
    4972           0 :                 msleep(1000 + delay);
    4973           0 :                 return;
    4974             :         }
    4975             : 
    4976             :         /*
    4977             :          * For PCIe downstream and root ports that do not support speeds
    4978             :          * greater than 5 GT/s need to wait minimum 100 ms. For higher
    4979             :          * speeds (gen3) we need to wait first for the data link layer to
    4980             :          * become active.
    4981             :          *
    4982             :          * However, 100 ms is the minimum and the PCIe spec says the
    4983             :          * software must allow at least 1s before it can determine that the
    4984             :          * device that did not respond is a broken device. There is
    4985             :          * evidence that 100 ms is not always enough, for example certain
    4986             :          * Titan Ridge xHCI controller does not always respond to
    4987             :          * configuration requests if we only wait for 100 ms (see
    4988             :          * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
    4989             :          *
    4990             :          * Therefore we wait for 100 ms and check for the device presence.
    4991             :          * If it is still not present give it an additional 100 ms.
    4992             :          */
    4993           0 :         if (!pcie_downstream_port(dev))
    4994             :                 return;
    4995             : 
    4996           0 :         if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
    4997             :                 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
    4998           0 :                 msleep(delay);
    4999             :         } else {
    5000             :                 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
    5001             :                         delay);
    5002           0 :                 if (!pcie_wait_for_link_delay(dev, true, delay)) {
    5003             :                         /* Did not train, no need to wait any further */
    5004           0 :                         pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
    5005           0 :                         return;
    5006             :                 }
    5007             :         }
    5008             : 
    5009           0 :         if (!pci_device_is_present(child)) {
    5010             :                 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
    5011           0 :                 msleep(delay);
    5012             :         }
    5013             : }
    5014             : 
    5015           0 : void pci_reset_secondary_bus(struct pci_dev *dev)
    5016             : {
    5017             :         u16 ctrl;
    5018             : 
    5019           0 :         pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
    5020           0 :         ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
    5021           0 :         pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
    5022             : 
    5023             :         /*
    5024             :          * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
    5025             :          * this to 2ms to ensure that we meet the minimum requirement.
    5026             :          */
    5027           0 :         msleep(2);
    5028             : 
    5029           0 :         ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
    5030           0 :         pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
    5031             : 
    5032             :         /*
    5033             :          * Trhfa for conventional PCI is 2^25 clock cycles.
    5034             :          * Assuming a minimum 33MHz clock this results in a 1s
    5035             :          * delay before we can consider subordinate devices to
    5036             :          * be re-initialized.  PCIe has some ways to shorten this,
    5037             :          * but we don't make use of them yet.
    5038             :          */
    5039           0 :         ssleep(1);
    5040           0 : }
    5041             : 
    5042           0 : void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
    5043             : {
    5044           0 :         pci_reset_secondary_bus(dev);
    5045           0 : }
    5046             : 
    5047             : /**
    5048             :  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
    5049             :  * @dev: Bridge device
    5050             :  *
    5051             :  * Use the bridge control register to assert reset on the secondary bus.
    5052             :  * Devices on the secondary bus are left in power-on state.
    5053             :  */
    5054           0 : int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
    5055             : {
    5056           0 :         pcibios_reset_secondary_bus(dev);
    5057             : 
    5058           0 :         return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
    5059             : }
    5060             : EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
    5061             : 
    5062           0 : static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
    5063             : {
    5064             :         struct pci_dev *pdev;
    5065             : 
    5066           0 :         if (pci_is_root_bus(dev->bus) || dev->subordinate ||
    5067           0 :             !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
    5068             :                 return -ENOTTY;
    5069             : 
    5070           0 :         list_for_each_entry(pdev, &dev->bus->devices, bus_list)
    5071           0 :                 if (pdev != dev)
    5072             :                         return -ENOTTY;
    5073             : 
    5074           0 :         if (probe)
    5075             :                 return 0;
    5076             : 
    5077           0 :         return pci_bridge_secondary_bus_reset(dev->bus->self);
    5078             : }
    5079             : 
    5080             : static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
    5081             : {
    5082           0 :         int rc = -ENOTTY;
    5083             : 
    5084           0 :         if (!hotplug || !try_module_get(hotplug->owner))
    5085             :                 return rc;
    5086             : 
    5087           0 :         if (hotplug->ops->reset_slot)
    5088           0 :                 rc = hotplug->ops->reset_slot(hotplug, probe);
    5089             : 
    5090             :         module_put(hotplug->owner);
    5091             : 
    5092             :         return rc;
    5093             : }
    5094             : 
    5095           0 : static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
    5096             : {
    5097           0 :         if (dev->multifunction || dev->subordinate || !dev->slot ||
    5098           0 :             dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
    5099             :                 return -ENOTTY;
    5100             : 
    5101           0 :         return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
    5102             : }
    5103             : 
    5104           0 : static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
    5105             : {
    5106             :         int rc;
    5107             : 
    5108           0 :         rc = pci_dev_reset_slot_function(dev, probe);
    5109           0 :         if (rc != -ENOTTY)
    5110             :                 return rc;
    5111           0 :         return pci_parent_bus_reset(dev, probe);
    5112             : }
    5113             : 
    5114           0 : void pci_dev_lock(struct pci_dev *dev)
    5115             : {
    5116           0 :         pci_cfg_access_lock(dev);
    5117             :         /* block PM suspend, driver probe, etc. */
    5118           0 :         device_lock(&dev->dev);
    5119           0 : }
    5120             : EXPORT_SYMBOL_GPL(pci_dev_lock);
    5121             : 
    5122             : /* Return 1 on successful lock, 0 on contention */
    5123           0 : int pci_dev_trylock(struct pci_dev *dev)
    5124             : {
    5125           0 :         if (pci_cfg_access_trylock(dev)) {
    5126           0 :                 if (device_trylock(&dev->dev))
    5127             :                         return 1;
    5128           0 :                 pci_cfg_access_unlock(dev);
    5129             :         }
    5130             : 
    5131             :         return 0;
    5132             : }
    5133             : EXPORT_SYMBOL_GPL(pci_dev_trylock);
    5134             : 
    5135           0 : void pci_dev_unlock(struct pci_dev *dev)
    5136             : {
    5137           0 :         device_unlock(&dev->dev);
    5138           0 :         pci_cfg_access_unlock(dev);
    5139           0 : }
    5140             : EXPORT_SYMBOL_GPL(pci_dev_unlock);
    5141             : 
    5142           0 : static void pci_dev_save_and_disable(struct pci_dev *dev)
    5143             : {
    5144           0 :         const struct pci_error_handlers *err_handler =
    5145           0 :                         dev->driver ? dev->driver->err_handler : NULL;
    5146             : 
    5147             :         /*
    5148             :          * dev->driver->err_handler->reset_prepare() is protected against
    5149             :          * races with ->remove() by the device lock, which must be held by
    5150             :          * the caller.
    5151             :          */
    5152           0 :         if (err_handler && err_handler->reset_prepare)
    5153           0 :                 err_handler->reset_prepare(dev);
    5154             : 
    5155             :         /*
    5156             :          * Wake-up device prior to save.  PM registers default to D0 after
    5157             :          * reset and a simple register restore doesn't reliably return
    5158             :          * to a non-D0 state anyway.
    5159             :          */
    5160           0 :         pci_set_power_state(dev, PCI_D0);
    5161             : 
    5162           0 :         pci_save_state(dev);
    5163             :         /*
    5164             :          * Disable the device by clearing the Command register, except for
    5165             :          * INTx-disable which is set.  This not only disables MMIO and I/O port
    5166             :          * BARs, but also prevents the device from being Bus Master, preventing
    5167             :          * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
    5168             :          * compliant devices, INTx-disable prevents legacy interrupts.
    5169             :          */
    5170           0 :         pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
    5171           0 : }
    5172             : 
    5173           0 : static void pci_dev_restore(struct pci_dev *dev)
    5174             : {
    5175           0 :         const struct pci_error_handlers *err_handler =
    5176           0 :                         dev->driver ? dev->driver->err_handler : NULL;
    5177             : 
    5178           0 :         pci_restore_state(dev);
    5179             : 
    5180             :         /*
    5181             :          * dev->driver->err_handler->reset_done() is protected against
    5182             :          * races with ->remove() by the device lock, which must be held by
    5183             :          * the caller.
    5184             :          */
    5185           0 :         if (err_handler && err_handler->reset_done)
    5186           0 :                 err_handler->reset_done(dev);
    5187           0 : }
    5188             : 
    5189             : /* dev->reset_methods[] is a 0-terminated list of indices into this array */
    5190             : static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
    5191             :         { },
    5192             :         { pci_dev_specific_reset, .name = "device_specific" },
    5193             :         { pci_dev_acpi_reset, .name = "acpi" },
    5194             :         { pcie_reset_flr, .name = "flr" },
    5195             :         { pci_af_flr, .name = "af_flr" },
    5196             :         { pci_pm_reset, .name = "pm" },
    5197             :         { pci_reset_bus_function, .name = "bus" },
    5198             : };
    5199             : 
    5200           0 : static ssize_t reset_method_show(struct device *dev,
    5201             :                                  struct device_attribute *attr, char *buf)
    5202             : {
    5203           0 :         struct pci_dev *pdev = to_pci_dev(dev);
    5204           0 :         ssize_t len = 0;
    5205             :         int i, m;
    5206             : 
    5207           0 :         for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
    5208           0 :                 m = pdev->reset_methods[i];
    5209           0 :                 if (!m)
    5210             :                         break;
    5211             : 
    5212           0 :                 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
    5213             :                                      pci_reset_fn_methods[m].name);
    5214             :         }
    5215             : 
    5216           0 :         if (len)
    5217           0 :                 len += sysfs_emit_at(buf, len, "\n");
    5218             : 
    5219           0 :         return len;
    5220             : }
    5221             : 
    5222             : static int reset_method_lookup(const char *name)
    5223             : {
    5224             :         int m;
    5225             : 
    5226           0 :         for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
    5227           0 :                 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
    5228             :                         return m;
    5229             :         }
    5230             : 
    5231             :         return 0;       /* not found */
    5232             : }
    5233             : 
    5234           0 : static ssize_t reset_method_store(struct device *dev,
    5235             :                                   struct device_attribute *attr,
    5236             :                                   const char *buf, size_t count)
    5237             : {
    5238           0 :         struct pci_dev *pdev = to_pci_dev(dev);
    5239             :         char *options, *name;
    5240             :         int m, n;
    5241           0 :         u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
    5242             : 
    5243           0 :         if (sysfs_streq(buf, "")) {
    5244           0 :                 pdev->reset_methods[0] = 0;
    5245           0 :                 pci_warn(pdev, "All device reset methods disabled by user");
    5246           0 :                 return count;
    5247             :         }
    5248             : 
    5249           0 :         if (sysfs_streq(buf, "default")) {
    5250           0 :                 pci_init_reset_methods(pdev);
    5251           0 :                 return count;
    5252             :         }
    5253             : 
    5254           0 :         options = kstrndup(buf, count, GFP_KERNEL);
    5255           0 :         if (!options)
    5256             :                 return -ENOMEM;
    5257             : 
    5258             :         n = 0;
    5259           0 :         while ((name = strsep(&options, " ")) != NULL) {
    5260           0 :                 if (sysfs_streq(name, ""))
    5261           0 :                         continue;
    5262             : 
    5263           0 :                 name = strim(name);
    5264             : 
    5265           0 :                 m = reset_method_lookup(name);
    5266           0 :                 if (!m) {
    5267           0 :                         pci_err(pdev, "Invalid reset method '%s'", name);
    5268           0 :                         goto error;
    5269             :                 }
    5270             : 
    5271           0 :                 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
    5272           0 :                         pci_err(pdev, "Unsupported reset method '%s'", name);
    5273           0 :                         goto error;
    5274             :                 }
    5275             : 
    5276           0 :                 if (n == PCI_NUM_RESET_METHODS - 1) {
    5277           0 :                         pci_err(pdev, "Too many reset methods\n");
    5278           0 :                         goto error;
    5279             :                 }
    5280             : 
    5281           0 :                 reset_methods[n++] = m;
    5282             :         }
    5283             : 
    5284           0 :         reset_methods[n] = 0;
    5285             : 
    5286             :         /* Warn if dev-specific supported but not highest priority */
    5287           0 :         if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
    5288           0 :             reset_methods[0] != 1)
    5289           0 :                 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
    5290           0 :         memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
    5291           0 :         kfree(options);
    5292           0 :         return count;
    5293             : 
    5294             : error:
    5295             :         /* Leave previous methods unchanged */
    5296           0 :         kfree(options);
    5297           0 :         return -EINVAL;
    5298             : }
    5299             : static DEVICE_ATTR_RW(reset_method);
    5300             : 
    5301             : static struct attribute *pci_dev_reset_method_attrs[] = {
    5302             :         &dev_attr_reset_method.attr,
    5303             :         NULL,
    5304             : };
    5305             : 
    5306           0 : static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
    5307             :                                                     struct attribute *a, int n)
    5308             : {
    5309           0 :         struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
    5310             : 
    5311           0 :         if (!pci_reset_supported(pdev))
    5312             :                 return 0;
    5313             : 
    5314           0 :         return a->mode;
    5315             : }
    5316             : 
    5317             : const struct attribute_group pci_dev_reset_method_attr_group = {
    5318             :         .attrs = pci_dev_reset_method_attrs,
    5319             :         .is_visible = pci_dev_reset_method_attr_is_visible,
    5320             : };
    5321             : 
    5322             : /**
    5323             :  * __pci_reset_function_locked - reset a PCI device function while holding
    5324             :  * the @dev mutex lock.
    5325             :  * @dev: PCI device to reset
    5326             :  *
    5327             :  * Some devices allow an individual function to be reset without affecting
    5328             :  * other functions in the same device.  The PCI device must be responsive
    5329             :  * to PCI config space in order to use this function.
    5330             :  *
    5331             :  * The device function is presumed to be unused and the caller is holding
    5332             :  * the device mutex lock when this function is called.
    5333             :  *
    5334             :  * Resetting the device will make the contents of PCI configuration space
    5335             :  * random, so any caller of this must be prepared to reinitialise the
    5336             :  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
    5337             :  * etc.
    5338             :  *
    5339             :  * Returns 0 if the device function was successfully reset or negative if the
    5340             :  * device doesn't support resetting a single function.
    5341             :  */
    5342           0 : int __pci_reset_function_locked(struct pci_dev *dev)
    5343             : {
    5344             :         int i, m, rc;
    5345             : 
    5346             :         might_sleep();
    5347             : 
    5348             :         /*
    5349             :          * A reset method returns -ENOTTY if it doesn't support this device and
    5350             :          * we should try the next method.
    5351             :          *
    5352             :          * If it returns 0 (success), we're finished.  If it returns any other
    5353             :          * error, we're also finished: this indicates that further reset
    5354             :          * mechanisms might be broken on the device.
    5355             :          */
    5356           0 :         for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
    5357           0 :                 m = dev->reset_methods[i];
    5358           0 :                 if (!m)
    5359             :                         return -ENOTTY;
    5360             : 
    5361           0 :                 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
    5362           0 :                 if (!rc)
    5363             :                         return 0;
    5364           0 :                 if (rc != -ENOTTY)
    5365             :                         return rc;
    5366             :         }
    5367             : 
    5368             :         return -ENOTTY;
    5369             : }
    5370             : EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
    5371             : 
    5372             : /**
    5373             :  * pci_init_reset_methods - check whether device can be safely reset
    5374             :  * and store supported reset mechanisms.
    5375             :  * @dev: PCI device to check for reset mechanisms
    5376             :  *
    5377             :  * Some devices allow an individual function to be reset without affecting
    5378             :  * other functions in the same device.  The PCI device must be in D0-D3hot
    5379             :  * state.
    5380             :  *
    5381             :  * Stores reset mechanisms supported by device in reset_methods byte array
    5382             :  * which is a member of struct pci_dev.
    5383             :  */
    5384           0 : void pci_init_reset_methods(struct pci_dev *dev)
    5385             : {
    5386             :         int m, i, rc;
    5387             : 
    5388             :         BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
    5389             : 
    5390             :         might_sleep();
    5391             : 
    5392           0 :         i = 0;
    5393           0 :         for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
    5394           0 :                 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
    5395           0 :                 if (!rc)
    5396           0 :                         dev->reset_methods[i++] = m;
    5397           0 :                 else if (rc != -ENOTTY)
    5398             :                         break;
    5399             :         }
    5400             : 
    5401           0 :         dev->reset_methods[i] = 0;
    5402           0 : }
    5403             : 
    5404             : /**
    5405             :  * pci_reset_function - quiesce and reset a PCI device function
    5406             :  * @dev: PCI device to reset
    5407             :  *
    5408             :  * Some devices allow an individual function to be reset without affecting
    5409             :  * other functions in the same device.  The PCI device must be responsive
    5410             :  * to PCI config space in order to use this function.
    5411             :  *
    5412             :  * This function does not just reset the PCI portion of a device, but
    5413             :  * clears all the state associated with the device.  This function differs
    5414             :  * from __pci_reset_function_locked() in that it saves and restores device state
    5415             :  * over the reset and takes the PCI device lock.
    5416             :  *
    5417             :  * Returns 0 if the device function was successfully reset or negative if the
    5418             :  * device doesn't support resetting a single function.
    5419             :  */
    5420           0 : int pci_reset_function(struct pci_dev *dev)
    5421             : {
    5422             :         int rc;
    5423             : 
    5424           0 :         if (!pci_reset_supported(dev))
    5425             :                 return -ENOTTY;
    5426             : 
    5427           0 :         pci_dev_lock(dev);
    5428           0 :         pci_dev_save_and_disable(dev);
    5429             : 
    5430           0 :         rc = __pci_reset_function_locked(dev);
    5431             : 
    5432           0 :         pci_dev_restore(dev);
    5433           0 :         pci_dev_unlock(dev);
    5434             : 
    5435           0 :         return rc;
    5436             : }
    5437             : EXPORT_SYMBOL_GPL(pci_reset_function);
    5438             : 
    5439             : /**
    5440             :  * pci_reset_function_locked - quiesce and reset a PCI device function
    5441             :  * @dev: PCI device to reset
    5442             :  *
    5443             :  * Some devices allow an individual function to be reset without affecting
    5444             :  * other functions in the same device.  The PCI device must be responsive
    5445             :  * to PCI config space in order to use this function.
    5446             :  *
    5447             :  * This function does not just reset the PCI portion of a device, but
    5448             :  * clears all the state associated with the device.  This function differs
    5449             :  * from __pci_reset_function_locked() in that it saves and restores device state
    5450             :  * over the reset.  It also differs from pci_reset_function() in that it
    5451             :  * requires the PCI device lock to be held.
    5452             :  *
    5453             :  * Returns 0 if the device function was successfully reset or negative if the
    5454             :  * device doesn't support resetting a single function.
    5455             :  */
    5456           0 : int pci_reset_function_locked(struct pci_dev *dev)
    5457             : {
    5458             :         int rc;
    5459             : 
    5460           0 :         if (!pci_reset_supported(dev))
    5461             :                 return -ENOTTY;
    5462             : 
    5463           0 :         pci_dev_save_and_disable(dev);
    5464             : 
    5465           0 :         rc = __pci_reset_function_locked(dev);
    5466             : 
    5467           0 :         pci_dev_restore(dev);
    5468             : 
    5469           0 :         return rc;
    5470             : }
    5471             : EXPORT_SYMBOL_GPL(pci_reset_function_locked);
    5472             : 
    5473             : /**
    5474             :  * pci_try_reset_function - quiesce and reset a PCI device function
    5475             :  * @dev: PCI device to reset
    5476             :  *
    5477             :  * Same as above, except return -EAGAIN if unable to lock device.
    5478             :  */
    5479           0 : int pci_try_reset_function(struct pci_dev *dev)
    5480             : {
    5481             :         int rc;
    5482             : 
    5483           0 :         if (!pci_reset_supported(dev))
    5484             :                 return -ENOTTY;
    5485             : 
    5486           0 :         if (!pci_dev_trylock(dev))
    5487             :                 return -EAGAIN;
    5488             : 
    5489           0 :         pci_dev_save_and_disable(dev);
    5490           0 :         rc = __pci_reset_function_locked(dev);
    5491           0 :         pci_dev_restore(dev);
    5492           0 :         pci_dev_unlock(dev);
    5493             : 
    5494           0 :         return rc;
    5495             : }
    5496             : EXPORT_SYMBOL_GPL(pci_try_reset_function);
    5497             : 
    5498             : /* Do any devices on or below this bus prevent a bus reset? */
    5499           0 : static bool pci_bus_resetable(struct pci_bus *bus)
    5500             : {
    5501             :         struct pci_dev *dev;
    5502             : 
    5503             : 
    5504           0 :         if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
    5505             :                 return false;
    5506             : 
    5507           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5508           0 :                 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
    5509           0 :                     (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
    5510             :                         return false;
    5511             :         }
    5512             : 
    5513             :         return true;
    5514             : }
    5515             : 
    5516             : /* Lock devices from the top of the tree down */
    5517           0 : static void pci_bus_lock(struct pci_bus *bus)
    5518             : {
    5519             :         struct pci_dev *dev;
    5520             : 
    5521           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5522           0 :                 pci_dev_lock(dev);
    5523           0 :                 if (dev->subordinate)
    5524           0 :                         pci_bus_lock(dev->subordinate);
    5525             :         }
    5526           0 : }
    5527             : 
    5528             : /* Unlock devices from the bottom of the tree up */
    5529           0 : static void pci_bus_unlock(struct pci_bus *bus)
    5530             : {
    5531             :         struct pci_dev *dev;
    5532             : 
    5533           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5534           0 :                 if (dev->subordinate)
    5535           0 :                         pci_bus_unlock(dev->subordinate);
    5536           0 :                 pci_dev_unlock(dev);
    5537             :         }
    5538           0 : }
    5539             : 
    5540             : /* Return 1 on successful lock, 0 on contention */
    5541           0 : static int pci_bus_trylock(struct pci_bus *bus)
    5542             : {
    5543             :         struct pci_dev *dev;
    5544             : 
    5545           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5546           0 :                 if (!pci_dev_trylock(dev))
    5547             :                         goto unlock;
    5548           0 :                 if (dev->subordinate) {
    5549           0 :                         if (!pci_bus_trylock(dev->subordinate)) {
    5550             :                                 pci_dev_unlock(dev);
    5551             :                                 goto unlock;
    5552             :                         }
    5553             :                 }
    5554             :         }
    5555             :         return 1;
    5556             : 
    5557             : unlock:
    5558           0 :         list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
    5559           0 :                 if (dev->subordinate)
    5560           0 :                         pci_bus_unlock(dev->subordinate);
    5561           0 :                 pci_dev_unlock(dev);
    5562             :         }
    5563             :         return 0;
    5564             : }
    5565             : 
    5566             : /* Do any devices on or below this slot prevent a bus reset? */
    5567           0 : static bool pci_slot_resetable(struct pci_slot *slot)
    5568             : {
    5569             :         struct pci_dev *dev;
    5570             : 
    5571           0 :         if (slot->bus->self &&
    5572           0 :             (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
    5573             :                 return false;
    5574             : 
    5575           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5576           0 :                 if (!dev->slot || dev->slot != slot)
    5577           0 :                         continue;
    5578           0 :                 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
    5579           0 :                     (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
    5580             :                         return false;
    5581             :         }
    5582             : 
    5583             :         return true;
    5584             : }
    5585             : 
    5586             : /* Lock devices from the top of the tree down */
    5587           0 : static void pci_slot_lock(struct pci_slot *slot)
    5588             : {
    5589             :         struct pci_dev *dev;
    5590             : 
    5591           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5592           0 :                 if (!dev->slot || dev->slot != slot)
    5593           0 :                         continue;
    5594           0 :                 pci_dev_lock(dev);
    5595           0 :                 if (dev->subordinate)
    5596           0 :                         pci_bus_lock(dev->subordinate);
    5597             :         }
    5598           0 : }
    5599             : 
    5600             : /* Unlock devices from the bottom of the tree up */
    5601           0 : static void pci_slot_unlock(struct pci_slot *slot)
    5602             : {
    5603             :         struct pci_dev *dev;
    5604             : 
    5605           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5606           0 :                 if (!dev->slot || dev->slot != slot)
    5607           0 :                         continue;
    5608           0 :                 if (dev->subordinate)
    5609           0 :                         pci_bus_unlock(dev->subordinate);
    5610             :                 pci_dev_unlock(dev);
    5611             :         }
    5612           0 : }
    5613             : 
    5614             : /* Return 1 on successful lock, 0 on contention */
    5615           0 : static int pci_slot_trylock(struct pci_slot *slot)
    5616             : {
    5617             :         struct pci_dev *dev;
    5618             : 
    5619           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5620           0 :                 if (!dev->slot || dev->slot != slot)
    5621           0 :                         continue;
    5622           0 :                 if (!pci_dev_trylock(dev))
    5623             :                         goto unlock;
    5624           0 :                 if (dev->subordinate) {
    5625           0 :                         if (!pci_bus_trylock(dev->subordinate)) {
    5626             :                                 pci_dev_unlock(dev);
    5627             :                                 goto unlock;
    5628             :                         }
    5629             :                 }
    5630             :         }
    5631             :         return 1;
    5632             : 
    5633             : unlock:
    5634           0 :         list_for_each_entry_continue_reverse(dev,
    5635             :                                              &slot->bus->devices, bus_list) {
    5636           0 :                 if (!dev->slot || dev->slot != slot)
    5637           0 :                         continue;
    5638           0 :                 if (dev->subordinate)
    5639           0 :                         pci_bus_unlock(dev->subordinate);
    5640             :                 pci_dev_unlock(dev);
    5641             :         }
    5642             :         return 0;
    5643             : }
    5644             : 
    5645             : /*
    5646             :  * Save and disable devices from the top of the tree down while holding
    5647             :  * the @dev mutex lock for the entire tree.
    5648             :  */
    5649           0 : static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
    5650             : {
    5651             :         struct pci_dev *dev;
    5652             : 
    5653           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5654           0 :                 pci_dev_save_and_disable(dev);
    5655           0 :                 if (dev->subordinate)
    5656           0 :                         pci_bus_save_and_disable_locked(dev->subordinate);
    5657             :         }
    5658           0 : }
    5659             : 
    5660             : /*
    5661             :  * Restore devices from top of the tree down while holding @dev mutex lock
    5662             :  * for the entire tree.  Parent bridges need to be restored before we can
    5663             :  * get to subordinate devices.
    5664             :  */
    5665           0 : static void pci_bus_restore_locked(struct pci_bus *bus)
    5666             : {
    5667             :         struct pci_dev *dev;
    5668             : 
    5669           0 :         list_for_each_entry(dev, &bus->devices, bus_list) {
    5670           0 :                 pci_dev_restore(dev);
    5671           0 :                 if (dev->subordinate)
    5672           0 :                         pci_bus_restore_locked(dev->subordinate);
    5673             :         }
    5674           0 : }
    5675             : 
    5676             : /*
    5677             :  * Save and disable devices from the top of the tree down while holding
    5678             :  * the @dev mutex lock for the entire tree.
    5679             :  */
    5680           0 : static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
    5681             : {
    5682             :         struct pci_dev *dev;
    5683             : 
    5684           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5685           0 :                 if (!dev->slot || dev->slot != slot)
    5686           0 :                         continue;
    5687           0 :                 pci_dev_save_and_disable(dev);
    5688           0 :                 if (dev->subordinate)
    5689           0 :                         pci_bus_save_and_disable_locked(dev->subordinate);
    5690             :         }
    5691           0 : }
    5692             : 
    5693             : /*
    5694             :  * Restore devices from top of the tree down while holding @dev mutex lock
    5695             :  * for the entire tree.  Parent bridges need to be restored before we can
    5696             :  * get to subordinate devices.
    5697             :  */
    5698           0 : static void pci_slot_restore_locked(struct pci_slot *slot)
    5699             : {
    5700             :         struct pci_dev *dev;
    5701             : 
    5702           0 :         list_for_each_entry(dev, &slot->bus->devices, bus_list) {
    5703           0 :                 if (!dev->slot || dev->slot != slot)
    5704           0 :                         continue;
    5705           0 :                 pci_dev_restore(dev);
    5706           0 :                 if (dev->subordinate)
    5707           0 :                         pci_bus_restore_locked(dev->subordinate);
    5708             :         }
    5709           0 : }
    5710             : 
    5711           0 : static int pci_slot_reset(struct pci_slot *slot, bool probe)
    5712             : {
    5713             :         int rc;
    5714             : 
    5715           0 :         if (!slot || !pci_slot_resetable(slot))
    5716             :                 return -ENOTTY;
    5717             : 
    5718           0 :         if (!probe)
    5719           0 :                 pci_slot_lock(slot);
    5720             : 
    5721             :         might_sleep();
    5722             : 
    5723           0 :         rc = pci_reset_hotplug_slot(slot->hotplug, probe);
    5724             : 
    5725           0 :         if (!probe)
    5726           0 :                 pci_slot_unlock(slot);
    5727             : 
    5728             :         return rc;
    5729             : }
    5730             : 
    5731             : /**
    5732             :  * pci_probe_reset_slot - probe whether a PCI slot can be reset
    5733             :  * @slot: PCI slot to probe
    5734             :  *
    5735             :  * Return 0 if slot can be reset, negative if a slot reset is not supported.
    5736             :  */
    5737           0 : int pci_probe_reset_slot(struct pci_slot *slot)
    5738             : {
    5739           0 :         return pci_slot_reset(slot, PCI_RESET_PROBE);
    5740             : }
    5741             : EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
    5742             : 
    5743             : /**
    5744             :  * __pci_reset_slot - Try to reset a PCI slot
    5745             :  * @slot: PCI slot to reset
    5746             :  *
    5747             :  * A PCI bus may host multiple slots, each slot may support a reset mechanism
    5748             :  * independent of other slots.  For instance, some slots may support slot power
    5749             :  * control.  In the case of a 1:1 bus to slot architecture, this function may
    5750             :  * wrap the bus reset to avoid spurious slot related events such as hotplug.
    5751             :  * Generally a slot reset should be attempted before a bus reset.  All of the
    5752             :  * function of the slot and any subordinate buses behind the slot are reset
    5753             :  * through this function.  PCI config space of all devices in the slot and
    5754             :  * behind the slot is saved before and restored after reset.
    5755             :  *
    5756             :  * Same as above except return -EAGAIN if the slot cannot be locked
    5757             :  */
    5758           0 : static int __pci_reset_slot(struct pci_slot *slot)
    5759             : {
    5760             :         int rc;
    5761             : 
    5762           0 :         rc = pci_slot_reset(slot, PCI_RESET_PROBE);
    5763           0 :         if (rc)
    5764             :                 return rc;
    5765             : 
    5766           0 :         if (pci_slot_trylock(slot)) {
    5767           0 :                 pci_slot_save_and_disable_locked(slot);
    5768             :                 might_sleep();
    5769           0 :                 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
    5770           0 :                 pci_slot_restore_locked(slot);
    5771           0 :                 pci_slot_unlock(slot);
    5772             :         } else
    5773             :                 rc = -EAGAIN;
    5774             : 
    5775             :         return rc;
    5776             : }
    5777             : 
    5778           0 : static int pci_bus_reset(struct pci_bus *bus, bool probe)
    5779             : {
    5780             :         int ret;
    5781             : 
    5782           0 :         if (!bus->self || !pci_bus_resetable(bus))
    5783             :                 return -ENOTTY;
    5784             : 
    5785           0 :         if (probe)
    5786             :                 return 0;
    5787             : 
    5788           0 :         pci_bus_lock(bus);
    5789             : 
    5790             :         might_sleep();
    5791             : 
    5792           0 :         ret = pci_bridge_secondary_bus_reset(bus->self);
    5793             : 
    5794           0 :         pci_bus_unlock(bus);
    5795             : 
    5796           0 :         return ret;
    5797             : }
    5798             : 
    5799             : /**
    5800             :  * pci_bus_error_reset - reset the bridge's subordinate bus
    5801             :  * @bridge: The parent device that connects to the bus to reset
    5802             :  *
    5803             :  * This function will first try to reset the slots on this bus if the method is
    5804             :  * available. If slot reset fails or is not available, this will fall back to a
    5805             :  * secondary bus reset.
    5806             :  */
    5807           0 : int pci_bus_error_reset(struct pci_dev *bridge)
    5808             : {
    5809           0 :         struct pci_bus *bus = bridge->subordinate;
    5810             :         struct pci_slot *slot;
    5811             : 
    5812           0 :         if (!bus)
    5813             :                 return -ENOTTY;
    5814             : 
    5815           0 :         mutex_lock(&pci_slot_mutex);
    5816           0 :         if (list_empty(&bus->slots))
    5817             :                 goto bus_reset;
    5818             : 
    5819           0 :         list_for_each_entry(slot, &bus->slots, list)
    5820           0 :                 if (pci_probe_reset_slot(slot))
    5821             :                         goto bus_reset;
    5822             : 
    5823           0 :         list_for_each_entry(slot, &bus->slots, list)
    5824           0 :                 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
    5825             :                         goto bus_reset;
    5826             : 
    5827           0 :         mutex_unlock(&pci_slot_mutex);
    5828           0 :         return 0;
    5829             : bus_reset:
    5830           0 :         mutex_unlock(&pci_slot_mutex);
    5831           0 :         return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
    5832             : }
    5833             : 
    5834             : /**
    5835             :  * pci_probe_reset_bus - probe whether a PCI bus can be reset
    5836             :  * @bus: PCI bus to probe
    5837             :  *
    5838             :  * Return 0 if bus can be reset, negative if a bus reset is not supported.
    5839             :  */
    5840           0 : int pci_probe_reset_bus(struct pci_bus *bus)
    5841             : {
    5842           0 :         return pci_bus_reset(bus, PCI_RESET_PROBE);
    5843             : }
    5844             : EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
    5845             : 
    5846             : /**
    5847             :  * __pci_reset_bus - Try to reset a PCI bus
    5848             :  * @bus: top level PCI bus to reset
    5849             :  *
    5850             :  * Same as above except return -EAGAIN if the bus cannot be locked
    5851             :  */
    5852           0 : static int __pci_reset_bus(struct pci_bus *bus)
    5853             : {
    5854             :         int rc;
    5855             : 
    5856           0 :         rc = pci_bus_reset(bus, PCI_RESET_PROBE);
    5857           0 :         if (rc)
    5858             :                 return rc;
    5859             : 
    5860           0 :         if (pci_bus_trylock(bus)) {
    5861           0 :                 pci_bus_save_and_disable_locked(bus);
    5862             :                 might_sleep();
    5863           0 :                 rc = pci_bridge_secondary_bus_reset(bus->self);
    5864           0 :                 pci_bus_restore_locked(bus);
    5865           0 :                 pci_bus_unlock(bus);
    5866             :         } else
    5867             :                 rc = -EAGAIN;
    5868             : 
    5869             :         return rc;
    5870             : }
    5871             : 
    5872             : /**
    5873             :  * pci_reset_bus - Try to reset a PCI bus
    5874             :  * @pdev: top level PCI device to reset via slot/bus
    5875             :  *
    5876             :  * Same as above except return -EAGAIN if the bus cannot be locked
    5877             :  */
    5878           0 : int pci_reset_bus(struct pci_dev *pdev)
    5879             : {
    5880           0 :         return (!pci_probe_reset_slot(pdev->slot)) ?
    5881           0 :             __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
    5882             : }
    5883             : EXPORT_SYMBOL_GPL(pci_reset_bus);
    5884             : 
    5885             : /**
    5886             :  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
    5887             :  * @dev: PCI device to query
    5888             :  *
    5889             :  * Returns mmrbc: maximum designed memory read count in bytes or
    5890             :  * appropriate error value.
    5891             :  */
    5892           0 : int pcix_get_max_mmrbc(struct pci_dev *dev)
    5893             : {
    5894             :         int cap;
    5895             :         u32 stat;
    5896             : 
    5897           0 :         cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    5898           0 :         if (!cap)
    5899             :                 return -EINVAL;
    5900             : 
    5901           0 :         if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
    5902             :                 return -EINVAL;
    5903             : 
    5904           0 :         return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
    5905             : }
    5906             : EXPORT_SYMBOL(pcix_get_max_mmrbc);
    5907             : 
    5908             : /**
    5909             :  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
    5910             :  * @dev: PCI device to query
    5911             :  *
    5912             :  * Returns mmrbc: maximum memory read count in bytes or appropriate error
    5913             :  * value.
    5914             :  */
    5915           0 : int pcix_get_mmrbc(struct pci_dev *dev)
    5916             : {
    5917             :         int cap;
    5918             :         u16 cmd;
    5919             : 
    5920           0 :         cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    5921           0 :         if (!cap)
    5922             :                 return -EINVAL;
    5923             : 
    5924           0 :         if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
    5925             :                 return -EINVAL;
    5926             : 
    5927           0 :         return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
    5928             : }
    5929             : EXPORT_SYMBOL(pcix_get_mmrbc);
    5930             : 
    5931             : /**
    5932             :  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
    5933             :  * @dev: PCI device to query
    5934             :  * @mmrbc: maximum memory read count in bytes
    5935             :  *    valid values are 512, 1024, 2048, 4096
    5936             :  *
    5937             :  * If possible sets maximum memory read byte count, some bridges have errata
    5938             :  * that prevent this.
    5939             :  */
    5940           0 : int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
    5941             : {
    5942             :         int cap;
    5943             :         u32 stat, v, o;
    5944             :         u16 cmd;
    5945             : 
    5946           0 :         if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
    5947             :                 return -EINVAL;
    5948             : 
    5949           0 :         v = ffs(mmrbc) - 10;
    5950             : 
    5951           0 :         cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    5952           0 :         if (!cap)
    5953             :                 return -EINVAL;
    5954             : 
    5955           0 :         if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
    5956             :                 return -EINVAL;
    5957             : 
    5958           0 :         if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
    5959             :                 return -E2BIG;
    5960             : 
    5961           0 :         if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
    5962             :                 return -EINVAL;
    5963             : 
    5964           0 :         o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
    5965           0 :         if (o != v) {
    5966           0 :                 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
    5967             :                         return -EIO;
    5968             : 
    5969           0 :                 cmd &= ~PCI_X_CMD_MAX_READ;
    5970           0 :                 cmd |= v << 2;
    5971           0 :                 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
    5972             :                         return -EIO;
    5973             :         }
    5974             :         return 0;
    5975             : }
    5976             : EXPORT_SYMBOL(pcix_set_mmrbc);
    5977             : 
    5978             : /**
    5979             :  * pcie_get_readrq - get PCI Express read request size
    5980             :  * @dev: PCI device to query
    5981             :  *
    5982             :  * Returns maximum memory read request in bytes or appropriate error value.
    5983             :  */
    5984           0 : int pcie_get_readrq(struct pci_dev *dev)
    5985             : {
    5986             :         u16 ctl;
    5987             : 
    5988           0 :         pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
    5989             : 
    5990           0 :         return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
    5991             : }
    5992             : EXPORT_SYMBOL(pcie_get_readrq);
    5993             : 
    5994             : /**
    5995             :  * pcie_set_readrq - set PCI Express maximum memory read request
    5996             :  * @dev: PCI device to query
    5997             :  * @rq: maximum memory read count in bytes
    5998             :  *    valid values are 128, 256, 512, 1024, 2048, 4096
    5999             :  *
    6000             :  * If possible sets maximum memory read request in bytes
    6001             :  */
    6002           0 : int pcie_set_readrq(struct pci_dev *dev, int rq)
    6003             : {
    6004             :         u16 v;
    6005             :         int ret;
    6006             : 
    6007           0 :         if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
    6008             :                 return -EINVAL;
    6009             : 
    6010             :         /*
    6011             :          * If using the "performance" PCIe config, we clamp the read rq
    6012             :          * size to the max packet size to keep the host bridge from
    6013             :          * generating requests larger than we can cope with.
    6014             :          */
    6015           0 :         if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
    6016           0 :                 int mps = pcie_get_mps(dev);
    6017             : 
    6018           0 :                 if (mps < rq)
    6019           0 :                         rq = mps;
    6020             :         }
    6021             : 
    6022           0 :         v = (ffs(rq) - 8) << 12;
    6023             : 
    6024           0 :         ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
    6025             :                                                   PCI_EXP_DEVCTL_READRQ, v);
    6026             : 
    6027             :         return pcibios_err_to_errno(ret);
    6028             : }
    6029             : EXPORT_SYMBOL(pcie_set_readrq);
    6030             : 
    6031             : /**
    6032             :  * pcie_get_mps - get PCI Express maximum payload size
    6033             :  * @dev: PCI device to query
    6034             :  *
    6035             :  * Returns maximum payload size in bytes
    6036             :  */
    6037           0 : int pcie_get_mps(struct pci_dev *dev)
    6038             : {
    6039             :         u16 ctl;
    6040             : 
    6041           0 :         pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
    6042             : 
    6043           0 :         return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
    6044             : }
    6045             : EXPORT_SYMBOL(pcie_get_mps);
    6046             : 
    6047             : /**
    6048             :  * pcie_set_mps - set PCI Express maximum payload size
    6049             :  * @dev: PCI device to query
    6050             :  * @mps: maximum payload size in bytes
    6051             :  *    valid values are 128, 256, 512, 1024, 2048, 4096
    6052             :  *
    6053             :  * If possible sets maximum payload size
    6054             :  */
    6055           0 : int pcie_set_mps(struct pci_dev *dev, int mps)
    6056             : {
    6057             :         u16 v;
    6058             :         int ret;
    6059             : 
    6060           0 :         if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
    6061             :                 return -EINVAL;
    6062             : 
    6063           0 :         v = ffs(mps) - 8;
    6064           0 :         if (v > dev->pcie_mpss)
    6065             :                 return -EINVAL;
    6066           0 :         v <<= 5;
    6067             : 
    6068           0 :         ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
    6069             :                                                   PCI_EXP_DEVCTL_PAYLOAD, v);
    6070             : 
    6071             :         return pcibios_err_to_errno(ret);
    6072             : }
    6073             : EXPORT_SYMBOL(pcie_set_mps);
    6074             : 
    6075             : /**
    6076             :  * pcie_bandwidth_available - determine minimum link settings of a PCIe
    6077             :  *                            device and its bandwidth limitation
    6078             :  * @dev: PCI device to query
    6079             :  * @limiting_dev: storage for device causing the bandwidth limitation
    6080             :  * @speed: storage for speed of limiting device
    6081             :  * @width: storage for width of limiting device
    6082             :  *
    6083             :  * Walk up the PCI device chain and find the point where the minimum
    6084             :  * bandwidth is available.  Return the bandwidth available there and (if
    6085             :  * limiting_dev, speed, and width pointers are supplied) information about
    6086             :  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
    6087             :  * raw bandwidth.
    6088             :  */
    6089           0 : u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
    6090             :                              enum pci_bus_speed *speed,
    6091             :                              enum pcie_link_width *width)
    6092             : {
    6093             :         u16 lnksta;
    6094             :         enum pci_bus_speed next_speed;
    6095             :         enum pcie_link_width next_width;
    6096             :         u32 bw, next_bw;
    6097             : 
    6098           0 :         if (speed)
    6099           0 :                 *speed = PCI_SPEED_UNKNOWN;
    6100           0 :         if (width)
    6101           0 :                 *width = PCIE_LNK_WIDTH_UNKNOWN;
    6102             : 
    6103             :         bw = 0;
    6104             : 
    6105           0 :         while (dev) {
    6106           0 :                 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
    6107             : 
    6108           0 :                 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
    6109           0 :                 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
    6110             :                         PCI_EXP_LNKSTA_NLW_SHIFT;
    6111             : 
    6112           0 :                 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
    6113             : 
    6114             :                 /* Check if current device limits the total bandwidth */
    6115           0 :                 if (!bw || next_bw <= bw) {
    6116           0 :                         bw = next_bw;
    6117             : 
    6118           0 :                         if (limiting_dev)
    6119           0 :                                 *limiting_dev = dev;
    6120           0 :                         if (speed)
    6121           0 :                                 *speed = next_speed;
    6122           0 :                         if (width)
    6123           0 :                                 *width = next_width;
    6124             :                 }
    6125             : 
    6126           0 :                 dev = pci_upstream_bridge(dev);
    6127             :         }
    6128             : 
    6129           0 :         return bw;
    6130             : }
    6131             : EXPORT_SYMBOL(pcie_bandwidth_available);
    6132             : 
    6133             : /**
    6134             :  * pcie_get_speed_cap - query for the PCI device's link speed capability
    6135             :  * @dev: PCI device to query
    6136             :  *
    6137             :  * Query the PCI device speed capability.  Return the maximum link speed
    6138             :  * supported by the device.
    6139             :  */
    6140           0 : enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
    6141             : {
    6142             :         u32 lnkcap2, lnkcap;
    6143             : 
    6144             :         /*
    6145             :          * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
    6146             :          * implementation note there recommends using the Supported Link
    6147             :          * Speeds Vector in Link Capabilities 2 when supported.
    6148             :          *
    6149             :          * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
    6150             :          * should use the Supported Link Speeds field in Link Capabilities,
    6151             :          * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
    6152             :          */
    6153           0 :         pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
    6154             : 
    6155             :         /* PCIe r3.0-compliant */
    6156           0 :         if (lnkcap2)
    6157           0 :                 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
    6158             : 
    6159           0 :         pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
    6160           0 :         if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
    6161             :                 return PCIE_SPEED_5_0GT;
    6162           0 :         else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
    6163             :                 return PCIE_SPEED_2_5GT;
    6164             : 
    6165           0 :         return PCI_SPEED_UNKNOWN;
    6166             : }
    6167             : EXPORT_SYMBOL(pcie_get_speed_cap);
    6168             : 
    6169             : /**
    6170             :  * pcie_get_width_cap - query for the PCI device's link width capability
    6171             :  * @dev: PCI device to query
    6172             :  *
    6173             :  * Query the PCI device width capability.  Return the maximum link width
    6174             :  * supported by the device.
    6175             :  */
    6176           0 : enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
    6177             : {
    6178             :         u32 lnkcap;
    6179             : 
    6180           0 :         pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
    6181           0 :         if (lnkcap)
    6182           0 :                 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
    6183             : 
    6184             :         return PCIE_LNK_WIDTH_UNKNOWN;
    6185             : }
    6186             : EXPORT_SYMBOL(pcie_get_width_cap);
    6187             : 
    6188             : /**
    6189             :  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
    6190             :  * @dev: PCI device
    6191             :  * @speed: storage for link speed
    6192             :  * @width: storage for link width
    6193             :  *
    6194             :  * Calculate a PCI device's link bandwidth by querying for its link speed
    6195             :  * and width, multiplying them, and applying encoding overhead.  The result
    6196             :  * is in Mb/s, i.e., megabits/second of raw bandwidth.
    6197             :  */
    6198           0 : u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
    6199             :                            enum pcie_link_width *width)
    6200             : {
    6201           0 :         *speed = pcie_get_speed_cap(dev);
    6202           0 :         *width = pcie_get_width_cap(dev);
    6203             : 
    6204           0 :         if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
    6205             :                 return 0;
    6206             : 
    6207           0 :         return *width * PCIE_SPEED2MBS_ENC(*speed);
    6208             : }
    6209             : 
    6210             : /**
    6211             :  * __pcie_print_link_status - Report the PCI device's link speed and width
    6212             :  * @dev: PCI device to query
    6213             :  * @verbose: Print info even when enough bandwidth is available
    6214             :  *
    6215             :  * If the available bandwidth at the device is less than the device is
    6216             :  * capable of, report the device's maximum possible bandwidth and the
    6217             :  * upstream link that limits its performance.  If @verbose, always print
    6218             :  * the available bandwidth, even if the device isn't constrained.
    6219             :  */
    6220           0 : void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
    6221             : {
    6222             :         enum pcie_link_width width, width_cap;
    6223             :         enum pci_bus_speed speed, speed_cap;
    6224           0 :         struct pci_dev *limiting_dev = NULL;
    6225             :         u32 bw_avail, bw_cap;
    6226             : 
    6227           0 :         bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
    6228           0 :         bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
    6229             : 
    6230           0 :         if (bw_avail >= bw_cap && verbose)
    6231           0 :                 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
    6232             :                          bw_cap / 1000, bw_cap % 1000,
    6233             :                          pci_speed_string(speed_cap), width_cap);
    6234           0 :         else if (bw_avail < bw_cap)
    6235           0 :                 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
    6236             :                          bw_avail / 1000, bw_avail % 1000,
    6237             :                          pci_speed_string(speed), width,
    6238             :                          limiting_dev ? pci_name(limiting_dev) : "<unknown>",
    6239             :                          bw_cap / 1000, bw_cap % 1000,
    6240             :                          pci_speed_string(speed_cap), width_cap);
    6241           0 : }
    6242             : 
    6243             : /**
    6244             :  * pcie_print_link_status - Report the PCI device's link speed and width
    6245             :  * @dev: PCI device to query
    6246             :  *
    6247             :  * Report the available bandwidth at the device.
    6248             :  */
    6249           0 : void pcie_print_link_status(struct pci_dev *dev)
    6250             : {
    6251           0 :         __pcie_print_link_status(dev, true);
    6252           0 : }
    6253             : EXPORT_SYMBOL(pcie_print_link_status);
    6254             : 
    6255             : /**
    6256             :  * pci_select_bars - Make BAR mask from the type of resource
    6257             :  * @dev: the PCI device for which BAR mask is made
    6258             :  * @flags: resource type mask to be selected
    6259             :  *
    6260             :  * This helper routine makes bar mask from the type of resource.
    6261             :  */
    6262           0 : int pci_select_bars(struct pci_dev *dev, unsigned long flags)
    6263             : {
    6264           0 :         int i, bars = 0;
    6265           0 :         for (i = 0; i < PCI_NUM_RESOURCES; i++)
    6266           0 :                 if (pci_resource_flags(dev, i) & flags)
    6267           0 :                         bars |= (1 << i);
    6268           0 :         return bars;
    6269             : }
    6270             : EXPORT_SYMBOL(pci_select_bars);
    6271             : 
    6272             : /* Some architectures require additional programming to enable VGA */
    6273             : static arch_set_vga_state_t arch_set_vga_state;
    6274             : 
    6275           0 : void __init pci_register_set_vga_state(arch_set_vga_state_t func)
    6276             : {
    6277           0 :         arch_set_vga_state = func;      /* NULL disables */
    6278           0 : }
    6279             : 
    6280             : static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
    6281             :                                   unsigned int command_bits, u32 flags)
    6282             : {
    6283           0 :         if (arch_set_vga_state)
    6284           0 :                 return arch_set_vga_state(dev, decode, command_bits,
    6285             :                                                 flags);
    6286             :         return 0;
    6287             : }
    6288             : 
    6289             : /**
    6290             :  * pci_set_vga_state - set VGA decode state on device and parents if requested
    6291             :  * @dev: the PCI device
    6292             :  * @decode: true = enable decoding, false = disable decoding
    6293             :  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
    6294             :  * @flags: traverse ancestors and change bridges
    6295             :  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
    6296             :  */
    6297           0 : int pci_set_vga_state(struct pci_dev *dev, bool decode,
    6298             :                       unsigned int command_bits, u32 flags)
    6299             : {
    6300             :         struct pci_bus *bus;
    6301             :         struct pci_dev *bridge;
    6302             :         u16 cmd;
    6303             :         int rc;
    6304             : 
    6305           0 :         WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
    6306             : 
    6307             :         /* ARCH specific VGA enables */
    6308           0 :         rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
    6309           0 :         if (rc)
    6310             :                 return rc;
    6311             : 
    6312           0 :         if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
    6313           0 :                 pci_read_config_word(dev, PCI_COMMAND, &cmd);
    6314           0 :                 if (decode)
    6315           0 :                         cmd |= command_bits;
    6316             :                 else
    6317           0 :                         cmd &= ~command_bits;
    6318           0 :                 pci_write_config_word(dev, PCI_COMMAND, cmd);
    6319             :         }
    6320             : 
    6321           0 :         if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
    6322             :                 return 0;
    6323             : 
    6324           0 :         bus = dev->bus;
    6325           0 :         while (bus) {
    6326           0 :                 bridge = bus->self;
    6327           0 :                 if (bridge) {
    6328           0 :                         pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
    6329             :                                              &cmd);
    6330           0 :                         if (decode)
    6331           0 :                                 cmd |= PCI_BRIDGE_CTL_VGA;
    6332             :                         else
    6333           0 :                                 cmd &= ~PCI_BRIDGE_CTL_VGA;
    6334           0 :                         pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
    6335             :                                               cmd);
    6336             :                 }
    6337           0 :                 bus = bus->parent;
    6338             :         }
    6339             :         return 0;
    6340             : }
    6341             : 
    6342             : #ifdef CONFIG_ACPI
    6343             : bool pci_pr3_present(struct pci_dev *pdev)
    6344             : {
    6345             :         struct acpi_device *adev;
    6346             : 
    6347             :         if (acpi_disabled)
    6348             :                 return false;
    6349             : 
    6350             :         adev = ACPI_COMPANION(&pdev->dev);
    6351             :         if (!adev)
    6352             :                 return false;
    6353             : 
    6354             :         return adev->power.flags.power_resources &&
    6355             :                 acpi_has_method(adev->handle, "_PR3");
    6356             : }
    6357             : EXPORT_SYMBOL_GPL(pci_pr3_present);
    6358             : #endif
    6359             : 
    6360             : /**
    6361             :  * pci_add_dma_alias - Add a DMA devfn alias for a device
    6362             :  * @dev: the PCI device for which alias is added
    6363             :  * @devfn_from: alias slot and function
    6364             :  * @nr_devfns: number of subsequent devfns to alias
    6365             :  *
    6366             :  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
    6367             :  * which is used to program permissible bus-devfn source addresses for DMA
    6368             :  * requests in an IOMMU.  These aliases factor into IOMMU group creation
    6369             :  * and are useful for devices generating DMA requests beyond or different
    6370             :  * from their logical bus-devfn.  Examples include device quirks where the
    6371             :  * device simply uses the wrong devfn, as well as non-transparent bridges
    6372             :  * where the alias may be a proxy for devices in another domain.
    6373             :  *
    6374             :  * IOMMU group creation is performed during device discovery or addition,
    6375             :  * prior to any potential DMA mapping and therefore prior to driver probing
    6376             :  * (especially for userspace assigned devices where IOMMU group definition
    6377             :  * cannot be left as a userspace activity).  DMA aliases should therefore
    6378             :  * be configured via quirks, such as the PCI fixup header quirk.
    6379             :  */
    6380           0 : void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
    6381             :                        unsigned int nr_devfns)
    6382             : {
    6383             :         int devfn_to;
    6384             : 
    6385           0 :         nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
    6386           0 :         devfn_to = devfn_from + nr_devfns - 1;
    6387             : 
    6388           0 :         if (!dev->dma_alias_mask)
    6389           0 :                 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
    6390           0 :         if (!dev->dma_alias_mask) {
    6391           0 :                 pci_warn(dev, "Unable to allocate DMA alias mask\n");
    6392           0 :                 return;
    6393             :         }
    6394             : 
    6395           0 :         bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
    6396             : 
    6397           0 :         if (nr_devfns == 1)
    6398           0 :                 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
    6399             :                                 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
    6400           0 :         else if (nr_devfns > 1)
    6401           0 :                 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
    6402             :                                 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
    6403             :                                 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
    6404             : }
    6405             : 
    6406           0 : bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
    6407             : {
    6408           0 :         return (dev1->dma_alias_mask &&
    6409           0 :                 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
    6410           0 :                (dev2->dma_alias_mask &&
    6411           0 :                 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
    6412           0 :                pci_real_dma_dev(dev1) == dev2 ||
    6413           0 :                pci_real_dma_dev(dev2) == dev1;
    6414             : }
    6415             : 
    6416           0 : bool pci_device_is_present(struct pci_dev *pdev)
    6417             : {
    6418             :         u32 v;
    6419             : 
    6420           0 :         if (pci_dev_is_disconnected(pdev))
    6421             :                 return false;
    6422           0 :         return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
    6423             : }
    6424             : EXPORT_SYMBOL_GPL(pci_device_is_present);
    6425             : 
    6426           0 : void pci_ignore_hotplug(struct pci_dev *dev)
    6427             : {
    6428           0 :         struct pci_dev *bridge = dev->bus->self;
    6429             : 
    6430           0 :         dev->ignore_hotplug = 1;
    6431             :         /* Propagate the "ignore hotplug" setting to the parent bridge. */
    6432           0 :         if (bridge)
    6433           0 :                 bridge->ignore_hotplug = 1;
    6434           0 : }
    6435             : EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
    6436             : 
    6437             : /**
    6438             :  * pci_real_dma_dev - Get PCI DMA device for PCI device
    6439             :  * @dev: the PCI device that may have a PCI DMA alias
    6440             :  *
    6441             :  * Permits the platform to provide architecture-specific functionality to
    6442             :  * devices needing to alias DMA to another PCI device on another PCI bus. If
    6443             :  * the PCI device is on the same bus, it is recommended to use
    6444             :  * pci_add_dma_alias(). This is the default implementation. Architecture
    6445             :  * implementations can override this.
    6446             :  */
    6447           0 : struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
    6448             : {
    6449           0 :         return dev;
    6450             : }
    6451             : 
    6452           0 : resource_size_t __weak pcibios_default_alignment(void)
    6453             : {
    6454           0 :         return 0;
    6455             : }
    6456             : 
    6457             : /*
    6458             :  * Arches that don't want to expose struct resource to userland as-is in
    6459             :  * sysfs and /proc can implement their own pci_resource_to_user().
    6460             :  */
    6461           0 : void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
    6462             :                                  const struct resource *rsrc,
    6463             :                                  resource_size_t *start, resource_size_t *end)
    6464             : {
    6465           0 :         *start = rsrc->start;
    6466           0 :         *end = rsrc->end;
    6467           0 : }
    6468             : 
    6469             : static char *resource_alignment_param;
    6470             : static DEFINE_SPINLOCK(resource_alignment_lock);
    6471             : 
    6472             : /**
    6473             :  * pci_specified_resource_alignment - get resource alignment specified by user.
    6474             :  * @dev: the PCI device to get
    6475             :  * @resize: whether or not to change resources' size when reassigning alignment
    6476             :  *
    6477             :  * RETURNS: Resource alignment if it is specified.
    6478             :  *          Zero if it is not specified.
    6479             :  */
    6480           0 : static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
    6481             :                                                         bool *resize)
    6482             : {
    6483             :         int align_order, count;
    6484           0 :         resource_size_t align = pcibios_default_alignment();
    6485             :         const char *p;
    6486             :         int ret;
    6487             : 
    6488           0 :         spin_lock(&resource_alignment_lock);
    6489           0 :         p = resource_alignment_param;
    6490           0 :         if (!p || !*p)
    6491             :                 goto out;
    6492           0 :         if (pci_has_flag(PCI_PROBE_ONLY)) {
    6493           0 :                 align = 0;
    6494           0 :                 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
    6495             :                 goto out;
    6496             :         }
    6497             : 
    6498           0 :         while (*p) {
    6499           0 :                 count = 0;
    6500           0 :                 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
    6501           0 :                     p[count] == '@') {
    6502           0 :                         p += count + 1;
    6503           0 :                         if (align_order > 63) {
    6504           0 :                                 pr_err("PCI: Invalid requested alignment (order %d)\n",
    6505             :                                        align_order);
    6506           0 :                                 align_order = PAGE_SHIFT;
    6507             :                         }
    6508             :                 } else {
    6509           0 :                         align_order = PAGE_SHIFT;
    6510             :                 }
    6511             : 
    6512           0 :                 ret = pci_dev_str_match(dev, p, &p);
    6513           0 :                 if (ret == 1) {
    6514           0 :                         *resize = true;
    6515           0 :                         align = 1ULL << align_order;
    6516           0 :                         break;
    6517           0 :                 } else if (ret < 0) {
    6518           0 :                         pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
    6519             :                                p);
    6520           0 :                         break;
    6521             :                 }
    6522             : 
    6523           0 :                 if (*p != ';' && *p != ',') {
    6524             :                         /* End of param or invalid format */
    6525             :                         break;
    6526             :                 }
    6527           0 :                 p++;
    6528             :         }
    6529             : out:
    6530           0 :         spin_unlock(&resource_alignment_lock);
    6531           0 :         return align;
    6532             : }
    6533             : 
    6534           0 : static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
    6535             :                                            resource_size_t align, bool resize)
    6536             : {
    6537           0 :         struct resource *r = &dev->resource[bar];
    6538             :         resource_size_t size;
    6539             : 
    6540           0 :         if (!(r->flags & IORESOURCE_MEM))
    6541             :                 return;
    6542             : 
    6543           0 :         if (r->flags & IORESOURCE_PCI_FIXED) {
    6544           0 :                 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
    6545             :                          bar, r, (unsigned long long)align);
    6546           0 :                 return;
    6547             :         }
    6548             : 
    6549           0 :         size = resource_size(r);
    6550           0 :         if (size >= align)
    6551             :                 return;
    6552             : 
    6553             :         /*
    6554             :          * Increase the alignment of the resource.  There are two ways we
    6555             :          * can do this:
    6556             :          *
    6557             :          * 1) Increase the size of the resource.  BARs are aligned on their
    6558             :          *    size, so when we reallocate space for this resource, we'll
    6559             :          *    allocate it with the larger alignment.  This also prevents
    6560             :          *    assignment of any other BARs inside the alignment region, so
    6561             :          *    if we're requesting page alignment, this means no other BARs
    6562             :          *    will share the page.
    6563             :          *
    6564             :          *    The disadvantage is that this makes the resource larger than
    6565             :          *    the hardware BAR, which may break drivers that compute things
    6566             :          *    based on the resource size, e.g., to find registers at a
    6567             :          *    fixed offset before the end of the BAR.
    6568             :          *
    6569             :          * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
    6570             :          *    set r->start to the desired alignment.  By itself this
    6571             :          *    doesn't prevent other BARs being put inside the alignment
    6572             :          *    region, but if we realign *every* resource of every device in
    6573             :          *    the system, none of them will share an alignment region.
    6574             :          *
    6575             :          * When the user has requested alignment for only some devices via
    6576             :          * the "pci=resource_alignment" argument, "resize" is true and we
    6577             :          * use the first method.  Otherwise we assume we're aligning all
    6578             :          * devices and we use the second.
    6579             :          */
    6580             : 
    6581           0 :         pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
    6582             :                  bar, r, (unsigned long long)align);
    6583             : 
    6584           0 :         if (resize) {
    6585           0 :                 r->start = 0;
    6586           0 :                 r->end = align - 1;
    6587             :         } else {
    6588           0 :                 r->flags &= ~IORESOURCE_SIZEALIGN;
    6589           0 :                 r->flags |= IORESOURCE_STARTALIGN;
    6590           0 :                 r->start = align;
    6591           0 :                 r->end = r->start + size - 1;
    6592             :         }
    6593           0 :         r->flags |= IORESOURCE_UNSET;
    6594             : }
    6595             : 
    6596             : /*
    6597             :  * This function disables memory decoding and releases memory resources
    6598             :  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
    6599             :  * It also rounds up size to specified alignment.
    6600             :  * Later on, the kernel will assign page-aligned memory resource back
    6601             :  * to the device.
    6602             :  */
    6603           0 : void pci_reassigndev_resource_alignment(struct pci_dev *dev)
    6604             : {
    6605             :         int i;
    6606             :         struct resource *r;
    6607             :         resource_size_t align;
    6608             :         u16 command;
    6609           0 :         bool resize = false;
    6610             : 
    6611             :         /*
    6612             :          * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
    6613             :          * 3.4.1.11.  Their resources are allocated from the space
    6614             :          * described by the VF BARx register in the PF's SR-IOV capability.
    6615             :          * We can't influence their alignment here.
    6616             :          */
    6617           0 :         if (dev->is_virtfn)
    6618           0 :                 return;
    6619             : 
    6620             :         /* check if specified PCI is target device to reassign */
    6621           0 :         align = pci_specified_resource_alignment(dev, &resize);
    6622           0 :         if (!align)
    6623             :                 return;
    6624             : 
    6625           0 :         if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
    6626           0 :             (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
    6627           0 :                 pci_warn(dev, "Can't reassign resources to host bridge\n");
    6628           0 :                 return;
    6629             :         }
    6630             : 
    6631           0 :         pci_read_config_word(dev, PCI_COMMAND, &command);
    6632           0 :         command &= ~PCI_COMMAND_MEMORY;
    6633           0 :         pci_write_config_word(dev, PCI_COMMAND, command);
    6634             : 
    6635           0 :         for (i = 0; i <= PCI_ROM_RESOURCE; i++)
    6636           0 :                 pci_request_resource_alignment(dev, i, align, resize);
    6637             : 
    6638             :         /*
    6639             :          * Need to disable bridge's resource window,
    6640             :          * to enable the kernel to reassign new resource
    6641             :          * window later on.
    6642             :          */
    6643           0 :         if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
    6644           0 :                 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
    6645           0 :                         r = &dev->resource[i];
    6646           0 :                         if (!(r->flags & IORESOURCE_MEM))
    6647           0 :                                 continue;
    6648           0 :                         r->flags |= IORESOURCE_UNSET;
    6649           0 :                         r->end = resource_size(r) - 1;
    6650           0 :                         r->start = 0;
    6651             :                 }
    6652           0 :                 pci_disable_bridge_window(dev);
    6653             :         }
    6654             : }
    6655             : 
    6656           0 : static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
    6657             : {
    6658           0 :         size_t count = 0;
    6659             : 
    6660           0 :         spin_lock(&resource_alignment_lock);
    6661           0 :         if (resource_alignment_param)
    6662           0 :                 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
    6663           0 :         spin_unlock(&resource_alignment_lock);
    6664             : 
    6665           0 :         return count;
    6666             : }
    6667             : 
    6668           0 : static ssize_t resource_alignment_store(struct bus_type *bus,
    6669             :                                         const char *buf, size_t count)
    6670             : {
    6671             :         char *param, *old, *end;
    6672             : 
    6673           0 :         if (count >= (PAGE_SIZE - 1))
    6674             :                 return -EINVAL;
    6675             : 
    6676           0 :         param = kstrndup(buf, count, GFP_KERNEL);
    6677           0 :         if (!param)
    6678             :                 return -ENOMEM;
    6679             : 
    6680           0 :         end = strchr(param, '\n');
    6681           0 :         if (end)
    6682           0 :                 *end = '\0';
    6683             : 
    6684           0 :         spin_lock(&resource_alignment_lock);
    6685           0 :         old = resource_alignment_param;
    6686           0 :         if (strlen(param)) {
    6687           0 :                 resource_alignment_param = param;
    6688             :         } else {
    6689           0 :                 kfree(param);
    6690           0 :                 resource_alignment_param = NULL;
    6691             :         }
    6692           0 :         spin_unlock(&resource_alignment_lock);
    6693             : 
    6694           0 :         kfree(old);
    6695             : 
    6696           0 :         return count;
    6697             : }
    6698             : 
    6699             : static BUS_ATTR_RW(resource_alignment);
    6700             : 
    6701           1 : static int __init pci_resource_alignment_sysfs_init(void)
    6702             : {
    6703           1 :         return bus_create_file(&pci_bus_type,
    6704             :                                         &bus_attr_resource_alignment);
    6705             : }
    6706             : late_initcall(pci_resource_alignment_sysfs_init);
    6707             : 
    6708             : static void pci_no_domains(void)
    6709             : {
    6710             : #ifdef CONFIG_PCI_DOMAINS
    6711             :         pci_domains_supported = 0;
    6712             : #endif
    6713             : }
    6714             : 
    6715             : #ifdef CONFIG_PCI_DOMAINS_GENERIC
    6716             : static atomic_t __domain_nr = ATOMIC_INIT(-1);
    6717             : 
    6718             : static int pci_get_new_domain_nr(void)
    6719             : {
    6720             :         return atomic_inc_return(&__domain_nr);
    6721             : }
    6722             : 
    6723             : static int of_pci_bus_find_domain_nr(struct device *parent)
    6724             : {
    6725             :         static int use_dt_domains = -1;
    6726             :         int domain = -1;
    6727             : 
    6728             :         if (parent)
    6729             :                 domain = of_get_pci_domain_nr(parent->of_node);
    6730             : 
    6731             :         /*
    6732             :          * Check DT domain and use_dt_domains values.
    6733             :          *
    6734             :          * If DT domain property is valid (domain >= 0) and
    6735             :          * use_dt_domains != 0, the DT assignment is valid since this means
    6736             :          * we have not previously allocated a domain number by using
    6737             :          * pci_get_new_domain_nr(); we should also update use_dt_domains to
    6738             :          * 1, to indicate that we have just assigned a domain number from
    6739             :          * DT.
    6740             :          *
    6741             :          * If DT domain property value is not valid (ie domain < 0), and we
    6742             :          * have not previously assigned a domain number from DT
    6743             :          * (use_dt_domains != 1) we should assign a domain number by
    6744             :          * using the:
    6745             :          *
    6746             :          * pci_get_new_domain_nr()
    6747             :          *
    6748             :          * API and update the use_dt_domains value to keep track of method we
    6749             :          * are using to assign domain numbers (use_dt_domains = 0).
    6750             :          *
    6751             :          * All other combinations imply we have a platform that is trying
    6752             :          * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
    6753             :          * which is a recipe for domain mishandling and it is prevented by
    6754             :          * invalidating the domain value (domain = -1) and printing a
    6755             :          * corresponding error.
    6756             :          */
    6757             :         if (domain >= 0 && use_dt_domains) {
    6758             :                 use_dt_domains = 1;
    6759             :         } else if (domain < 0 && use_dt_domains != 1) {
    6760             :                 use_dt_domains = 0;
    6761             :                 domain = pci_get_new_domain_nr();
    6762             :         } else {
    6763             :                 if (parent)
    6764             :                         pr_err("Node %pOF has ", parent->of_node);
    6765             :                 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
    6766             :                 domain = -1;
    6767             :         }
    6768             : 
    6769             :         return domain;
    6770             : }
    6771             : 
    6772             : int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
    6773             : {
    6774             :         return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
    6775             :                                acpi_pci_bus_find_domain_nr(bus);
    6776             : }
    6777             : #endif
    6778             : 
    6779             : /**
    6780             :  * pci_ext_cfg_avail - can we access extended PCI config space?
    6781             :  *
    6782             :  * Returns 1 if we can access PCI extended config space (offsets
    6783             :  * greater than 0xff). This is the default implementation. Architecture
    6784             :  * implementations can override this.
    6785             :  */
    6786           0 : int __weak pci_ext_cfg_avail(void)
    6787             : {
    6788           0 :         return 1;
    6789             : }
    6790             : 
    6791           0 : void __weak pci_fixup_cardbus(struct pci_bus *bus)
    6792             : {
    6793           0 : }
    6794             : EXPORT_SYMBOL(pci_fixup_cardbus);
    6795             : 
    6796           0 : static int __init pci_setup(char *str)
    6797             : {
    6798           0 :         while (str) {
    6799           0 :                 char *k = strchr(str, ',');
    6800           0 :                 if (k)
    6801           0 :                         *k++ = 0;
    6802           0 :                 if (*str && (str = pcibios_setup(str)) && *str) {
    6803           0 :                         if (!strcmp(str, "nomsi")) {
    6804           0 :                                 pci_no_msi();
    6805           0 :                         } else if (!strncmp(str, "noats", 5)) {
    6806           0 :                                 pr_info("PCIe: ATS is disabled\n");
    6807           0 :                                 pcie_ats_disabled = true;
    6808           0 :                         } else if (!strcmp(str, "noaer")) {
    6809             :                                 pci_no_aer();
    6810           0 :                         } else if (!strcmp(str, "earlydump")) {
    6811           0 :                                 pci_early_dump = true;
    6812           0 :                         } else if (!strncmp(str, "realloc=", 8)) {
    6813           0 :                                 pci_realloc_get_opt(str + 8);
    6814           0 :                         } else if (!strncmp(str, "realloc", 7)) {
    6815           0 :                                 pci_realloc_get_opt("on");
    6816           0 :                         } else if (!strcmp(str, "nodomains")) {
    6817             :                                 pci_no_domains();
    6818           0 :                         } else if (!strncmp(str, "noari", 5)) {
    6819           0 :                                 pcie_ari_disabled = true;
    6820           0 :                         } else if (!strncmp(str, "cbiosize=", 9)) {
    6821           0 :                                 pci_cardbus_io_size = memparse(str + 9, &str);
    6822           0 :                         } else if (!strncmp(str, "cbmemsize=", 10)) {
    6823           0 :                                 pci_cardbus_mem_size = memparse(str + 10, &str);
    6824           0 :                         } else if (!strncmp(str, "resource_alignment=", 19)) {
    6825           0 :                                 resource_alignment_param = str + 19;
    6826           0 :                         } else if (!strncmp(str, "ecrc=", 5)) {
    6827             :                                 pcie_ecrc_get_policy(str + 5);
    6828           0 :                         } else if (!strncmp(str, "hpiosize=", 9)) {
    6829           0 :                                 pci_hotplug_io_size = memparse(str + 9, &str);
    6830           0 :                         } else if (!strncmp(str, "hpmmiosize=", 11)) {
    6831           0 :                                 pci_hotplug_mmio_size = memparse(str + 11, &str);
    6832           0 :                         } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
    6833           0 :                                 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
    6834           0 :                         } else if (!strncmp(str, "hpmemsize=", 10)) {
    6835           0 :                                 pci_hotplug_mmio_size = memparse(str + 10, &str);
    6836           0 :                                 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
    6837           0 :                         } else if (!strncmp(str, "hpbussize=", 10)) {
    6838           0 :                                 pci_hotplug_bus_size =
    6839           0 :                                         simple_strtoul(str + 10, &str, 0);
    6840           0 :                                 if (pci_hotplug_bus_size > 0xff)
    6841           0 :                                         pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
    6842           0 :                         } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
    6843           0 :                                 pcie_bus_config = PCIE_BUS_TUNE_OFF;
    6844           0 :                         } else if (!strncmp(str, "pcie_bus_safe", 13)) {
    6845           0 :                                 pcie_bus_config = PCIE_BUS_SAFE;
    6846           0 :                         } else if (!strncmp(str, "pcie_bus_perf", 13)) {
    6847           0 :                                 pcie_bus_config = PCIE_BUS_PERFORMANCE;
    6848           0 :                         } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
    6849           0 :                                 pcie_bus_config = PCIE_BUS_PEER2PEER;
    6850           0 :                         } else if (!strncmp(str, "pcie_scan_all", 13)) {
    6851             :                                 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
    6852           0 :                         } else if (!strncmp(str, "disable_acs_redir=", 18)) {
    6853           0 :                                 disable_acs_redir_param = str + 18;
    6854             :                         } else {
    6855           0 :                                 pr_err("PCI: Unknown option `%s'\n", str);
    6856             :                         }
    6857             :                 }
    6858           0 :                 str = k;
    6859             :         }
    6860           0 :         return 0;
    6861             : }
    6862             : early_param("pci", pci_setup);
    6863             : 
    6864             : /*
    6865             :  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
    6866             :  * in pci_setup(), above, to point to data in the __initdata section which
    6867             :  * will be freed after the init sequence is complete. We can't allocate memory
    6868             :  * in pci_setup() because some architectures do not have any memory allocation
    6869             :  * service available during an early_param() call. So we allocate memory and
    6870             :  * copy the variable here before the init section is freed.
    6871             :  *
    6872             :  */
    6873           1 : static int __init pci_realloc_setup_params(void)
    6874             : {
    6875           1 :         resource_alignment_param = kstrdup(resource_alignment_param,
    6876             :                                            GFP_KERNEL);
    6877           1 :         disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
    6878             : 
    6879           1 :         return 0;
    6880             : }
    6881             : pure_initcall(pci_realloc_setup_params);

Generated by: LCOV version 1.14