LCOV - code coverage report
Current view: top level - drivers/base/power - runtime.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 55 659 8.3 %
Date: 2022-12-09 01:23:36 Functions: 6 50 12.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * drivers/base/power/runtime.c - Helper functions for device runtime PM
       4             :  *
       5             :  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
       6             :  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
       7             :  */
       8             : #include <linux/sched/mm.h>
       9             : #include <linux/ktime.h>
      10             : #include <linux/hrtimer.h>
      11             : #include <linux/export.h>
      12             : #include <linux/pm_runtime.h>
      13             : #include <linux/pm_wakeirq.h>
      14             : #include <trace/events/rpm.h>
      15             : 
      16             : #include "../base.h"
      17             : #include "power.h"
      18             : 
      19             : typedef int (*pm_callback_t)(struct device *);
      20             : 
      21           0 : static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
      22             : {
      23             :         pm_callback_t cb;
      24             :         const struct dev_pm_ops *ops;
      25             : 
      26           0 :         if (dev->pm_domain)
      27           0 :                 ops = &dev->pm_domain->ops;
      28           0 :         else if (dev->type && dev->type->pm)
      29             :                 ops = dev->type->pm;
      30           0 :         else if (dev->class && dev->class->pm)
      31             :                 ops = dev->class->pm;
      32           0 :         else if (dev->bus && dev->bus->pm)
      33           0 :                 ops = dev->bus->pm;
      34             :         else
      35             :                 ops = NULL;
      36             : 
      37           0 :         if (ops)
      38           0 :                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
      39             :         else
      40             :                 cb = NULL;
      41             : 
      42           0 :         if (!cb && dev->driver && dev->driver->pm)
      43           0 :                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
      44             : 
      45           0 :         return cb;
      46             : }
      47             : 
      48             : #define RPM_GET_CALLBACK(dev, callback) \
      49             :                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
      50             : 
      51             : static int rpm_resume(struct device *dev, int rpmflags);
      52             : static int rpm_suspend(struct device *dev, int rpmflags);
      53             : 
      54             : /**
      55             :  * update_pm_runtime_accounting - Update the time accounting of power states
      56             :  * @dev: Device to update the accounting for
      57             :  *
      58             :  * In order to be able to have time accounting of the various power states
      59             :  * (as used by programs such as PowerTOP to show the effectiveness of runtime
      60             :  * PM), we need to track the time spent in each state.
      61             :  * update_pm_runtime_accounting must be called each time before the
      62             :  * runtime_status field is updated, to account the time in the old state
      63             :  * correctly.
      64             :  */
      65           0 : static void update_pm_runtime_accounting(struct device *dev)
      66             : {
      67             :         u64 now, last, delta;
      68             : 
      69           0 :         if (dev->power.disable_depth > 0)
      70             :                 return;
      71             : 
      72           0 :         last = dev->power.accounting_timestamp;
      73             : 
      74           0 :         now = ktime_get_mono_fast_ns();
      75           0 :         dev->power.accounting_timestamp = now;
      76             : 
      77             :         /*
      78             :          * Because ktime_get_mono_fast_ns() is not monotonic during
      79             :          * timekeeping updates, ensure that 'now' is after the last saved
      80             :          * timesptamp.
      81             :          */
      82           0 :         if (now < last)
      83             :                 return;
      84             : 
      85           0 :         delta = now - last;
      86             : 
      87           0 :         if (dev->power.runtime_status == RPM_SUSPENDED)
      88           0 :                 dev->power.suspended_time += delta;
      89             :         else
      90           0 :                 dev->power.active_time += delta;
      91             : }
      92             : 
      93             : static void __update_runtime_status(struct device *dev, enum rpm_status status)
      94             : {
      95           0 :         update_pm_runtime_accounting(dev);
      96           0 :         dev->power.runtime_status = status;
      97             : }
      98             : 
      99           0 : static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
     100             : {
     101             :         u64 time;
     102             :         unsigned long flags;
     103             : 
     104           0 :         spin_lock_irqsave(&dev->power.lock, flags);
     105             : 
     106           0 :         update_pm_runtime_accounting(dev);
     107           0 :         time = suspended ? dev->power.suspended_time : dev->power.active_time;
     108             : 
     109           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
     110             : 
     111           0 :         return time;
     112             : }
     113             : 
     114           0 : u64 pm_runtime_active_time(struct device *dev)
     115             : {
     116           0 :         return rpm_get_accounted_time(dev, false);
     117             : }
     118             : 
     119           0 : u64 pm_runtime_suspended_time(struct device *dev)
     120             : {
     121           0 :         return rpm_get_accounted_time(dev, true);
     122             : }
     123             : EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
     124             : 
     125             : /**
     126             :  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
     127             :  * @dev: Device to handle.
     128             :  */
     129             : static void pm_runtime_deactivate_timer(struct device *dev)
     130             : {
     131         536 :         if (dev->power.timer_expires > 0) {
     132           0 :                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
     133           0 :                 dev->power.timer_expires = 0;
     134             :         }
     135             : }
     136             : 
     137             : /**
     138             :  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
     139             :  * @dev: Device to handle.
     140             :  */
     141             : static void pm_runtime_cancel_pending(struct device *dev)
     142             : {
     143           0 :         pm_runtime_deactivate_timer(dev);
     144             :         /*
     145             :          * In case there's a request pending, make sure its work function will
     146             :          * return without doing anything.
     147             :          */
     148           0 :         dev->power.request = RPM_REQ_NONE;
     149             : }
     150             : 
     151             : /*
     152             :  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
     153             :  * @dev: Device to handle.
     154             :  *
     155             :  * Compute the autosuspend-delay expiration time based on the device's
     156             :  * power.last_busy time.  If the delay has already expired or is disabled
     157             :  * (negative) or the power.use_autosuspend flag isn't set, return 0.
     158             :  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
     159             :  *
     160             :  * This function may be called either with or without dev->power.lock held.
     161             :  * Either way it can be racy, since power.last_busy may be updated at any time.
     162             :  */
     163           0 : u64 pm_runtime_autosuspend_expiration(struct device *dev)
     164             : {
     165             :         int autosuspend_delay;
     166             :         u64 expires;
     167             : 
     168           0 :         if (!dev->power.use_autosuspend)
     169             :                 return 0;
     170             : 
     171           0 :         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
     172           0 :         if (autosuspend_delay < 0)
     173             :                 return 0;
     174             : 
     175           0 :         expires  = READ_ONCE(dev->power.last_busy);
     176           0 :         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
     177           0 :         if (expires > ktime_get_mono_fast_ns())
     178             :                 return expires; /* Expires in the future */
     179             : 
     180           0 :         return 0;
     181             : }
     182             : EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
     183             : 
     184           0 : static int dev_memalloc_noio(struct device *dev, void *data)
     185             : {
     186           0 :         return dev->power.memalloc_noio;
     187             : }
     188             : 
     189             : /*
     190             :  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
     191             :  * @dev: Device to handle.
     192             :  * @enable: True for setting the flag and False for clearing the flag.
     193             :  *
     194             :  * Set the flag for all devices in the path from the device to the
     195             :  * root device in the device tree if @enable is true, otherwise clear
     196             :  * the flag for devices in the path whose siblings don't set the flag.
     197             :  *
     198             :  * The function should only be called by block device, or network
     199             :  * device driver for solving the deadlock problem during runtime
     200             :  * resume/suspend:
     201             :  *
     202             :  *     If memory allocation with GFP_KERNEL is called inside runtime
     203             :  *     resume/suspend callback of any one of its ancestors(or the
     204             :  *     block device itself), the deadlock may be triggered inside the
     205             :  *     memory allocation since it might not complete until the block
     206             :  *     device becomes active and the involed page I/O finishes. The
     207             :  *     situation is pointed out first by Alan Stern. Network device
     208             :  *     are involved in iSCSI kind of situation.
     209             :  *
     210             :  * The lock of dev_hotplug_mutex is held in the function for handling
     211             :  * hotplug race because pm_runtime_set_memalloc_noio() may be called
     212             :  * in async probe().
     213             :  *
     214             :  * The function should be called between device_add() and device_del()
     215             :  * on the affected device(block/network device).
     216             :  */
     217           0 : void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
     218             : {
     219             :         static DEFINE_MUTEX(dev_hotplug_mutex);
     220             : 
     221           0 :         mutex_lock(&dev_hotplug_mutex);
     222             :         for (;;) {
     223             :                 bool enabled;
     224             : 
     225             :                 /* hold power lock since bitfield is not SMP-safe. */
     226           0 :                 spin_lock_irq(&dev->power.lock);
     227           0 :                 enabled = dev->power.memalloc_noio;
     228           0 :                 dev->power.memalloc_noio = enable;
     229           0 :                 spin_unlock_irq(&dev->power.lock);
     230             : 
     231             :                 /*
     232             :                  * not need to enable ancestors any more if the device
     233             :                  * has been enabled.
     234             :                  */
     235           0 :                 if (enabled && enable)
     236             :                         break;
     237             : 
     238           0 :                 dev = dev->parent;
     239             : 
     240             :                 /*
     241             :                  * clear flag of the parent device only if all the
     242             :                  * children don't set the flag because ancestor's
     243             :                  * flag was set by any one of the descendants.
     244             :                  */
     245           0 :                 if (!dev || (!enable &&
     246           0 :                              device_for_each_child(dev, NULL,
     247             :                                                    dev_memalloc_noio)))
     248             :                         break;
     249             :         }
     250           0 :         mutex_unlock(&dev_hotplug_mutex);
     251           0 : }
     252             : EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
     253             : 
     254             : /**
     255             :  * rpm_check_suspend_allowed - Test whether a device may be suspended.
     256             :  * @dev: Device to test.
     257             :  */
     258           4 : static int rpm_check_suspend_allowed(struct device *dev)
     259             : {
     260           4 :         int retval = 0;
     261             : 
     262           4 :         if (dev->power.runtime_error)
     263             :                 retval = -EINVAL;
     264           4 :         else if (dev->power.disable_depth > 0)
     265             :                 retval = -EACCES;
     266           0 :         else if (atomic_read(&dev->power.usage_count) > 0)
     267             :                 retval = -EAGAIN;
     268           0 :         else if (!dev->power.ignore_children &&
     269           0 :                         atomic_read(&dev->power.child_count))
     270             :                 retval = -EBUSY;
     271             : 
     272             :         /* Pending resume requests take precedence over suspends. */
     273           0 :         else if ((dev->power.deferred_resume
     274           0 :                         && dev->power.runtime_status == RPM_SUSPENDING)
     275           0 :             || (dev->power.request_pending
     276           0 :                         && dev->power.request == RPM_REQ_RESUME))
     277             :                 retval = -EAGAIN;
     278           0 :         else if (__dev_pm_qos_resume_latency(dev) == 0)
     279             :                 retval = -EPERM;
     280           0 :         else if (dev->power.runtime_status == RPM_SUSPENDED)
     281           0 :                 retval = 1;
     282             : 
     283           4 :         return retval;
     284             : }
     285             : 
     286           0 : static int rpm_get_suppliers(struct device *dev)
     287             : {
     288             :         struct device_link *link;
     289             : 
     290           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     291             :                                 device_links_read_lock_held()) {
     292             :                 int retval;
     293             : 
     294           0 :                 if (!(link->flags & DL_FLAG_PM_RUNTIME))
     295           0 :                         continue;
     296             : 
     297           0 :                 retval = pm_runtime_get_sync(link->supplier);
     298             :                 /* Ignore suppliers with disabled runtime PM. */
     299           0 :                 if (retval < 0 && retval != -EACCES) {
     300           0 :                         pm_runtime_put_noidle(link->supplier);
     301             :                         return retval;
     302             :                 }
     303           0 :                 refcount_inc(&link->rpm_active);
     304             :         }
     305             :         return 0;
     306             : }
     307             : 
     308             : /**
     309             :  * pm_runtime_release_supplier - Drop references to device link's supplier.
     310             :  * @link: Target device link.
     311             :  * @check_idle: Whether or not to check if the supplier device is idle.
     312             :  *
     313             :  * Drop all runtime PM references associated with @link to its supplier device
     314             :  * and if @check_idle is set, check if that device is idle (and so it can be
     315             :  * suspended).
     316             :  */
     317           0 : void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
     318             : {
     319           0 :         struct device *supplier = link->supplier;
     320             : 
     321             :         /*
     322             :          * The additional power.usage_count check is a safety net in case
     323             :          * the rpm_active refcount becomes saturated, in which case
     324             :          * refcount_dec_not_one() would return true forever, but it is not
     325             :          * strictly necessary.
     326             :          */
     327           0 :         while (refcount_dec_not_one(&link->rpm_active) &&
     328           0 :                atomic_read(&supplier->power.usage_count) > 0)
     329             :                 pm_runtime_put_noidle(supplier);
     330             : 
     331           0 :         if (check_idle)
     332             :                 pm_request_idle(supplier);
     333           0 : }
     334             : 
     335           0 : static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
     336             : {
     337             :         struct device_link *link;
     338             : 
     339           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     340             :                                 device_links_read_lock_held())
     341           0 :                 pm_runtime_release_supplier(link, try_to_suspend);
     342           0 : }
     343             : 
     344           0 : static void rpm_put_suppliers(struct device *dev)
     345             : {
     346           0 :         __rpm_put_suppliers(dev, true);
     347           0 : }
     348             : 
     349           0 : static void rpm_suspend_suppliers(struct device *dev)
     350             : {
     351             :         struct device_link *link;
     352           0 :         int idx = device_links_read_lock();
     353             : 
     354           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     355             :                                 device_links_read_lock_held())
     356           0 :                 pm_request_idle(link->supplier);
     357             : 
     358           0 :         device_links_read_unlock(idx);
     359           0 : }
     360             : 
     361             : /**
     362             :  * __rpm_callback - Run a given runtime PM callback for a given device.
     363             :  * @cb: Runtime PM callback to run.
     364             :  * @dev: Device to run the callback for.
     365             :  */
     366           0 : static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
     367             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     368             : {
     369           0 :         int retval = 0, idx;
     370           0 :         bool use_links = dev->power.links_count > 0;
     371             : 
     372           0 :         if (dev->power.irq_safe) {
     373           0 :                 spin_unlock(&dev->power.lock);
     374             :         } else {
     375           0 :                 spin_unlock_irq(&dev->power.lock);
     376             : 
     377             :                 /*
     378             :                  * Resume suppliers if necessary.
     379             :                  *
     380             :                  * The device's runtime PM status cannot change until this
     381             :                  * routine returns, so it is safe to read the status outside of
     382             :                  * the lock.
     383             :                  */
     384           0 :                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
     385           0 :                         idx = device_links_read_lock();
     386             : 
     387           0 :                         retval = rpm_get_suppliers(dev);
     388           0 :                         if (retval) {
     389           0 :                                 rpm_put_suppliers(dev);
     390           0 :                                 goto fail;
     391             :                         }
     392             : 
     393           0 :                         device_links_read_unlock(idx);
     394             :                 }
     395             :         }
     396             : 
     397           0 :         if (cb)
     398           0 :                 retval = cb(dev);
     399             : 
     400           0 :         if (dev->power.irq_safe) {
     401           0 :                 spin_lock(&dev->power.lock);
     402             :         } else {
     403             :                 /*
     404             :                  * If the device is suspending and the callback has returned
     405             :                  * success, drop the usage counters of the suppliers that have
     406             :                  * been reference counted on its resume.
     407             :                  *
     408             :                  * Do that if resume fails too.
     409             :                  */
     410           0 :                 if (use_links
     411           0 :                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
     412           0 :                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
     413           0 :                         idx = device_links_read_lock();
     414             : 
     415           0 :                         __rpm_put_suppliers(dev, false);
     416             : 
     417             : fail:
     418           0 :                         device_links_read_unlock(idx);
     419             :                 }
     420             : 
     421           0 :                 spin_lock_irq(&dev->power.lock);
     422             :         }
     423             : 
     424           0 :         return retval;
     425             : }
     426             : 
     427             : /**
     428             :  * rpm_idle - Notify device bus type if the device can be suspended.
     429             :  * @dev: Device to notify the bus type about.
     430             :  * @rpmflags: Flag bits.
     431             :  *
     432             :  * Check if the device's runtime PM status allows it to be suspended.  If
     433             :  * another idle notification has been started earlier, return immediately.  If
     434             :  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
     435             :  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
     436             :  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
     437             :  *
     438             :  * This function must be called under dev->power.lock with interrupts disabled.
     439             :  */
     440           4 : static int rpm_idle(struct device *dev, int rpmflags)
     441             : {
     442             :         int (*callback)(struct device *);
     443             :         int retval;
     444             : 
     445           4 :         trace_rpm_idle_rcuidle(dev, rpmflags);
     446           4 :         retval = rpm_check_suspend_allowed(dev);
     447           4 :         if (retval < 0)
     448             :                 ;       /* Conditions are wrong. */
     449             : 
     450             :         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
     451           0 :         else if (dev->power.runtime_status != RPM_ACTIVE)
     452             :                 retval = -EAGAIN;
     453             : 
     454             :         /*
     455             :          * Any pending request other than an idle notification takes
     456             :          * precedence over us, except that the timer may be running.
     457             :          */
     458           0 :         else if (dev->power.request_pending &&
     459           0 :             dev->power.request > RPM_REQ_IDLE)
     460             :                 retval = -EAGAIN;
     461             : 
     462             :         /* Act as though RPM_NOWAIT is always set. */
     463           0 :         else if (dev->power.idle_notification)
     464           0 :                 retval = -EINPROGRESS;
     465           4 :         if (retval)
     466             :                 goto out;
     467             : 
     468             :         /* Pending requests need to be canceled. */
     469           0 :         dev->power.request = RPM_REQ_NONE;
     470             : 
     471           0 :         callback = RPM_GET_CALLBACK(dev, runtime_idle);
     472             : 
     473             :         /* If no callback assume success. */
     474           0 :         if (!callback || dev->power.no_callbacks)
     475             :                 goto out;
     476             : 
     477             :         /* Carry out an asynchronous or a synchronous idle notification. */
     478           0 :         if (rpmflags & RPM_ASYNC) {
     479           0 :                 dev->power.request = RPM_REQ_IDLE;
     480           0 :                 if (!dev->power.request_pending) {
     481           0 :                         dev->power.request_pending = true;
     482           0 :                         queue_work(pm_wq, &dev->power.work);
     483             :                 }
     484           0 :                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
     485           0 :                 return 0;
     486             :         }
     487             : 
     488           0 :         dev->power.idle_notification = true;
     489             : 
     490           0 :         retval = __rpm_callback(callback, dev);
     491             : 
     492           0 :         dev->power.idle_notification = false;
     493           0 :         wake_up_all(&dev->power.wait_queue);
     494             : 
     495             :  out:
     496           4 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     497           4 :         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
     498             : }
     499             : 
     500             : /**
     501             :  * rpm_callback - Run a given runtime PM callback for a given device.
     502             :  * @cb: Runtime PM callback to run.
     503             :  * @dev: Device to run the callback for.
     504             :  */
     505           0 : static int rpm_callback(int (*cb)(struct device *), struct device *dev)
     506             : {
     507             :         int retval;
     508             : 
     509           0 :         if (dev->power.memalloc_noio) {
     510             :                 unsigned int noio_flag;
     511             : 
     512             :                 /*
     513             :                  * Deadlock might be caused if memory allocation with
     514             :                  * GFP_KERNEL happens inside runtime_suspend and
     515             :                  * runtime_resume callbacks of one block device's
     516             :                  * ancestor or the block device itself. Network
     517             :                  * device might be thought as part of iSCSI block
     518             :                  * device, so network device and its ancestor should
     519             :                  * be marked as memalloc_noio too.
     520             :                  */
     521           0 :                 noio_flag = memalloc_noio_save();
     522           0 :                 retval = __rpm_callback(cb, dev);
     523             :                 memalloc_noio_restore(noio_flag);
     524             :         } else {
     525           0 :                 retval = __rpm_callback(cb, dev);
     526             :         }
     527             : 
     528           0 :         dev->power.runtime_error = retval;
     529           0 :         return retval != -EACCES ? retval : -EIO;
     530             : }
     531             : 
     532             : /**
     533             :  * rpm_suspend - Carry out runtime suspend of given device.
     534             :  * @dev: Device to suspend.
     535             :  * @rpmflags: Flag bits.
     536             :  *
     537             :  * Check if the device's runtime PM status allows it to be suspended.
     538             :  * Cancel a pending idle notification, autosuspend or suspend. If
     539             :  * another suspend has been started earlier, either return immediately
     540             :  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
     541             :  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
     542             :  * otherwise run the ->runtime_suspend() callback directly. When
     543             :  * ->runtime_suspend succeeded, if a deferred resume was requested while
     544             :  * the callback was running then carry it out, otherwise send an idle
     545             :  * notification for its parent (if the suspend succeeded and both
     546             :  * ignore_children of parent->power and irq_safe of dev->power are not set).
     547             :  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
     548             :  * flag is set and the next autosuspend-delay expiration time is in the
     549             :  * future, schedule another autosuspend attempt.
     550             :  *
     551             :  * This function must be called under dev->power.lock with interrupts disabled.
     552             :  */
     553           0 : static int rpm_suspend(struct device *dev, int rpmflags)
     554             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     555             : {
     556             :         int (*callback)(struct device *);
     557           0 :         struct device *parent = NULL;
     558             :         int retval;
     559             : 
     560           0 :         trace_rpm_suspend_rcuidle(dev, rpmflags);
     561             : 
     562             :  repeat:
     563           0 :         retval = rpm_check_suspend_allowed(dev);
     564           0 :         if (retval < 0)
     565             :                 goto out;       /* Conditions are wrong. */
     566             : 
     567             :         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
     568           0 :         if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
     569           0 :                 retval = -EAGAIN;
     570           0 :         if (retval)
     571             :                 goto out;
     572             : 
     573             :         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
     574           0 :         if ((rpmflags & RPM_AUTO)
     575           0 :             && dev->power.runtime_status != RPM_SUSPENDING) {
     576           0 :                 u64 expires = pm_runtime_autosuspend_expiration(dev);
     577             : 
     578           0 :                 if (expires != 0) {
     579             :                         /* Pending requests need to be canceled. */
     580           0 :                         dev->power.request = RPM_REQ_NONE;
     581             : 
     582             :                         /*
     583             :                          * Optimization: If the timer is already running and is
     584             :                          * set to expire at or before the autosuspend delay,
     585             :                          * avoid the overhead of resetting it.  Just let it
     586             :                          * expire; pm_suspend_timer_fn() will take care of the
     587             :                          * rest.
     588             :                          */
     589           0 :                         if (!(dev->power.timer_expires &&
     590             :                                         dev->power.timer_expires <= expires)) {
     591             :                                 /*
     592             :                                  * We add a slack of 25% to gather wakeups
     593             :                                  * without sacrificing the granularity.
     594             :                                  */
     595           0 :                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
     596             :                                                     (NSEC_PER_MSEC >> 2);
     597             : 
     598           0 :                                 dev->power.timer_expires = expires;
     599           0 :                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
     600             :                                                 ns_to_ktime(expires),
     601             :                                                 slack,
     602             :                                                 HRTIMER_MODE_ABS);
     603             :                         }
     604           0 :                         dev->power.timer_autosuspends = 1;
     605           0 :                         goto out;
     606             :                 }
     607             :         }
     608             : 
     609             :         /* Other scheduled or pending requests need to be canceled. */
     610           0 :         pm_runtime_cancel_pending(dev);
     611             : 
     612           0 :         if (dev->power.runtime_status == RPM_SUSPENDING) {
     613           0 :                 DEFINE_WAIT(wait);
     614             : 
     615           0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     616           0 :                         retval = -EINPROGRESS;
     617           0 :                         goto out;
     618             :                 }
     619             : 
     620           0 :                 if (dev->power.irq_safe) {
     621           0 :                         spin_unlock(&dev->power.lock);
     622             : 
     623             :                         cpu_relax();
     624             : 
     625           0 :                         spin_lock(&dev->power.lock);
     626           0 :                         goto repeat;
     627             :                 }
     628             : 
     629             :                 /* Wait for the other suspend running in parallel with us. */
     630             :                 for (;;) {
     631           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     632             :                                         TASK_UNINTERRUPTIBLE);
     633           0 :                         if (dev->power.runtime_status != RPM_SUSPENDING)
     634             :                                 break;
     635             : 
     636           0 :                         spin_unlock_irq(&dev->power.lock);
     637             : 
     638           0 :                         schedule();
     639             : 
     640           0 :                         spin_lock_irq(&dev->power.lock);
     641             :                 }
     642           0 :                 finish_wait(&dev->power.wait_queue, &wait);
     643           0 :                 goto repeat;
     644             :         }
     645             : 
     646           0 :         if (dev->power.no_callbacks)
     647             :                 goto no_callback;       /* Assume success. */
     648             : 
     649             :         /* Carry out an asynchronous or a synchronous suspend. */
     650           0 :         if (rpmflags & RPM_ASYNC) {
     651           0 :                 dev->power.request = (rpmflags & RPM_AUTO) ?
     652           0 :                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
     653           0 :                 if (!dev->power.request_pending) {
     654           0 :                         dev->power.request_pending = true;
     655           0 :                         queue_work(pm_wq, &dev->power.work);
     656             :                 }
     657             :                 goto out;
     658             :         }
     659             : 
     660           0 :         __update_runtime_status(dev, RPM_SUSPENDING);
     661             : 
     662           0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
     663             : 
     664           0 :         dev_pm_enable_wake_irq_check(dev, true);
     665           0 :         retval = rpm_callback(callback, dev);
     666           0 :         if (retval)
     667             :                 goto fail;
     668             : 
     669           0 :         dev_pm_enable_wake_irq_complete(dev);
     670             : 
     671             :  no_callback:
     672           0 :         __update_runtime_status(dev, RPM_SUSPENDED);
     673           0 :         pm_runtime_deactivate_timer(dev);
     674             : 
     675           0 :         if (dev->parent) {
     676           0 :                 parent = dev->parent;
     677           0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
     678             :         }
     679           0 :         wake_up_all(&dev->power.wait_queue);
     680             : 
     681           0 :         if (dev->power.deferred_resume) {
     682           0 :                 dev->power.deferred_resume = false;
     683           0 :                 rpm_resume(dev, 0);
     684           0 :                 retval = -EAGAIN;
     685           0 :                 goto out;
     686             :         }
     687             : 
     688           0 :         if (dev->power.irq_safe)
     689             :                 goto out;
     690             : 
     691             :         /* Maybe the parent is now able to suspend. */
     692           0 :         if (parent && !parent->power.ignore_children) {
     693           0 :                 spin_unlock(&dev->power.lock);
     694             : 
     695           0 :                 spin_lock(&parent->power.lock);
     696           0 :                 rpm_idle(parent, RPM_ASYNC);
     697           0 :                 spin_unlock(&parent->power.lock);
     698             : 
     699           0 :                 spin_lock(&dev->power.lock);
     700             :         }
     701             :         /* Maybe the suppliers are now able to suspend. */
     702           0 :         if (dev->power.links_count > 0) {
     703           0 :                 spin_unlock_irq(&dev->power.lock);
     704             : 
     705           0 :                 rpm_suspend_suppliers(dev);
     706             : 
     707           0 :                 spin_lock_irq(&dev->power.lock);
     708             :         }
     709             : 
     710             :  out:
     711           0 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     712             : 
     713           0 :         return retval;
     714             : 
     715             :  fail:
     716           0 :         dev_pm_disable_wake_irq_check(dev, true);
     717           0 :         __update_runtime_status(dev, RPM_ACTIVE);
     718           0 :         dev->power.deferred_resume = false;
     719           0 :         wake_up_all(&dev->power.wait_queue);
     720             : 
     721           0 :         if (retval == -EAGAIN || retval == -EBUSY) {
     722           0 :                 dev->power.runtime_error = 0;
     723             : 
     724             :                 /*
     725             :                  * If the callback routine failed an autosuspend, and
     726             :                  * if the last_busy time has been updated so that there
     727             :                  * is a new autosuspend expiration time, automatically
     728             :                  * reschedule another autosuspend.
     729             :                  */
     730           0 :                 if ((rpmflags & RPM_AUTO) &&
     731           0 :                     pm_runtime_autosuspend_expiration(dev) != 0)
     732             :                         goto repeat;
     733             :         } else {
     734             :                 pm_runtime_cancel_pending(dev);
     735             :         }
     736             :         goto out;
     737             : }
     738             : 
     739             : /**
     740             :  * rpm_resume - Carry out runtime resume of given device.
     741             :  * @dev: Device to resume.
     742             :  * @rpmflags: Flag bits.
     743             :  *
     744             :  * Check if the device's runtime PM status allows it to be resumed.  Cancel
     745             :  * any scheduled or pending requests.  If another resume has been started
     746             :  * earlier, either return immediately or wait for it to finish, depending on the
     747             :  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
     748             :  * parallel with this function, either tell the other process to resume after
     749             :  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
     750             :  * flag is set then queue a resume request; otherwise run the
     751             :  * ->runtime_resume() callback directly.  Queue an idle notification for the
     752             :  * device if the resume succeeded.
     753             :  *
     754             :  * This function must be called under dev->power.lock with interrupts disabled.
     755             :  */
     756           0 : static int rpm_resume(struct device *dev, int rpmflags)
     757             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     758             : {
     759             :         int (*callback)(struct device *);
     760           0 :         struct device *parent = NULL;
     761           0 :         int retval = 0;
     762             : 
     763           0 :         trace_rpm_resume_rcuidle(dev, rpmflags);
     764             : 
     765             :  repeat:
     766           0 :         if (dev->power.runtime_error) {
     767             :                 retval = -EINVAL;
     768           0 :         } else if (dev->power.disable_depth > 0) {
     769           0 :                 if (dev->power.runtime_status == RPM_ACTIVE &&
     770           0 :                     dev->power.last_status == RPM_ACTIVE)
     771             :                         retval = 1;
     772             :                 else
     773           0 :                         retval = -EACCES;
     774             :         }
     775           0 :         if (retval)
     776             :                 goto out;
     777             : 
     778             :         /*
     779             :          * Other scheduled or pending requests need to be canceled.  Small
     780             :          * optimization: If an autosuspend timer is running, leave it running
     781             :          * rather than cancelling it now only to restart it again in the near
     782             :          * future.
     783             :          */
     784           0 :         dev->power.request = RPM_REQ_NONE;
     785           0 :         if (!dev->power.timer_autosuspends)
     786             :                 pm_runtime_deactivate_timer(dev);
     787             : 
     788           0 :         if (dev->power.runtime_status == RPM_ACTIVE) {
     789             :                 retval = 1;
     790             :                 goto out;
     791             :         }
     792             : 
     793           0 :         if (dev->power.runtime_status == RPM_RESUMING
     794           0 :             || dev->power.runtime_status == RPM_SUSPENDING) {
     795           0 :                 DEFINE_WAIT(wait);
     796             : 
     797           0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     798           0 :                         if (dev->power.runtime_status == RPM_SUSPENDING)
     799           0 :                                 dev->power.deferred_resume = true;
     800             :                         else
     801             :                                 retval = -EINPROGRESS;
     802           0 :                         goto out;
     803             :                 }
     804             : 
     805           0 :                 if (dev->power.irq_safe) {
     806           0 :                         spin_unlock(&dev->power.lock);
     807             : 
     808             :                         cpu_relax();
     809             : 
     810           0 :                         spin_lock(&dev->power.lock);
     811           0 :                         goto repeat;
     812             :                 }
     813             : 
     814             :                 /* Wait for the operation carried out in parallel with us. */
     815             :                 for (;;) {
     816           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     817             :                                         TASK_UNINTERRUPTIBLE);
     818           0 :                         if (dev->power.runtime_status != RPM_RESUMING
     819           0 :                             && dev->power.runtime_status != RPM_SUSPENDING)
     820             :                                 break;
     821             : 
     822           0 :                         spin_unlock_irq(&dev->power.lock);
     823             : 
     824           0 :                         schedule();
     825             : 
     826           0 :                         spin_lock_irq(&dev->power.lock);
     827             :                 }
     828           0 :                 finish_wait(&dev->power.wait_queue, &wait);
     829           0 :                 goto repeat;
     830             :         }
     831             : 
     832             :         /*
     833             :          * See if we can skip waking up the parent.  This is safe only if
     834             :          * power.no_callbacks is set, because otherwise we don't know whether
     835             :          * the resume will actually succeed.
     836             :          */
     837           0 :         if (dev->power.no_callbacks && !parent && dev->parent) {
     838           0 :                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
     839           0 :                 if (dev->parent->power.disable_depth > 0
     840           0 :                     || dev->parent->power.ignore_children
     841           0 :                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
     842           0 :                         atomic_inc(&dev->parent->power.child_count);
     843           0 :                         spin_unlock(&dev->parent->power.lock);
     844           0 :                         retval = 1;
     845           0 :                         goto no_callback;       /* Assume success. */
     846             :                 }
     847           0 :                 spin_unlock(&dev->parent->power.lock);
     848             :         }
     849             : 
     850             :         /* Carry out an asynchronous or a synchronous resume. */
     851           0 :         if (rpmflags & RPM_ASYNC) {
     852           0 :                 dev->power.request = RPM_REQ_RESUME;
     853           0 :                 if (!dev->power.request_pending) {
     854           0 :                         dev->power.request_pending = true;
     855           0 :                         queue_work(pm_wq, &dev->power.work);
     856             :                 }
     857             :                 retval = 0;
     858             :                 goto out;
     859             :         }
     860             : 
     861           0 :         if (!parent && dev->parent) {
     862             :                 /*
     863             :                  * Increment the parent's usage counter and resume it if
     864             :                  * necessary.  Not needed if dev is irq-safe; then the
     865             :                  * parent is permanently resumed.
     866             :                  */
     867           0 :                 parent = dev->parent;
     868           0 :                 if (dev->power.irq_safe)
     869             :                         goto skip_parent;
     870           0 :                 spin_unlock(&dev->power.lock);
     871             : 
     872           0 :                 pm_runtime_get_noresume(parent);
     873             : 
     874           0 :                 spin_lock(&parent->power.lock);
     875             :                 /*
     876             :                  * Resume the parent if it has runtime PM enabled and not been
     877             :                  * set to ignore its children.
     878             :                  */
     879           0 :                 if (!parent->power.disable_depth
     880           0 :                     && !parent->power.ignore_children) {
     881           0 :                         rpm_resume(parent, 0);
     882           0 :                         if (parent->power.runtime_status != RPM_ACTIVE)
     883           0 :                                 retval = -EBUSY;
     884             :                 }
     885           0 :                 spin_unlock(&parent->power.lock);
     886             : 
     887           0 :                 spin_lock(&dev->power.lock);
     888           0 :                 if (retval)
     889             :                         goto out;
     890             :                 goto repeat;
     891             :         }
     892             :  skip_parent:
     893             : 
     894           0 :         if (dev->power.no_callbacks)
     895             :                 goto no_callback;       /* Assume success. */
     896             : 
     897           0 :         __update_runtime_status(dev, RPM_RESUMING);
     898             : 
     899           0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
     900             : 
     901           0 :         dev_pm_disable_wake_irq_check(dev, false);
     902           0 :         retval = rpm_callback(callback, dev);
     903           0 :         if (retval) {
     904           0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
     905           0 :                 pm_runtime_cancel_pending(dev);
     906           0 :                 dev_pm_enable_wake_irq_check(dev, false);
     907             :         } else {
     908             :  no_callback:
     909           0 :                 __update_runtime_status(dev, RPM_ACTIVE);
     910           0 :                 pm_runtime_mark_last_busy(dev);
     911           0 :                 if (parent)
     912           0 :                         atomic_inc(&parent->power.child_count);
     913             :         }
     914           0 :         wake_up_all(&dev->power.wait_queue);
     915             : 
     916           0 :         if (retval >= 0)
     917           0 :                 rpm_idle(dev, RPM_ASYNC);
     918             : 
     919             :  out:
     920           0 :         if (parent && !dev->power.irq_safe) {
     921           0 :                 spin_unlock_irq(&dev->power.lock);
     922             : 
     923           0 :                 pm_runtime_put(parent);
     924             : 
     925           0 :                 spin_lock_irq(&dev->power.lock);
     926             :         }
     927             : 
     928           0 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     929             : 
     930           0 :         return retval;
     931             : }
     932             : 
     933             : /**
     934             :  * pm_runtime_work - Universal runtime PM work function.
     935             :  * @work: Work structure used for scheduling the execution of this function.
     936             :  *
     937             :  * Use @work to get the device object the work is to be done for, determine what
     938             :  * is to be done and execute the appropriate runtime PM function.
     939             :  */
     940           0 : static void pm_runtime_work(struct work_struct *work)
     941             : {
     942           0 :         struct device *dev = container_of(work, struct device, power.work);
     943             :         enum rpm_request req;
     944             : 
     945           0 :         spin_lock_irq(&dev->power.lock);
     946             : 
     947           0 :         if (!dev->power.request_pending)
     948             :                 goto out;
     949             : 
     950           0 :         req = dev->power.request;
     951           0 :         dev->power.request = RPM_REQ_NONE;
     952           0 :         dev->power.request_pending = false;
     953             : 
     954           0 :         switch (req) {
     955             :         case RPM_REQ_NONE:
     956             :                 break;
     957             :         case RPM_REQ_IDLE:
     958           0 :                 rpm_idle(dev, RPM_NOWAIT);
     959           0 :                 break;
     960             :         case RPM_REQ_SUSPEND:
     961           0 :                 rpm_suspend(dev, RPM_NOWAIT);
     962           0 :                 break;
     963             :         case RPM_REQ_AUTOSUSPEND:
     964           0 :                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
     965           0 :                 break;
     966             :         case RPM_REQ_RESUME:
     967           0 :                 rpm_resume(dev, RPM_NOWAIT);
     968           0 :                 break;
     969             :         }
     970             : 
     971             :  out:
     972           0 :         spin_unlock_irq(&dev->power.lock);
     973           0 : }
     974             : 
     975             : /**
     976             :  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
     977             :  * @timer: hrtimer used by pm_schedule_suspend().
     978             :  *
     979             :  * Check if the time is right and queue a suspend request.
     980             :  */
     981           0 : static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
     982             : {
     983           0 :         struct device *dev = container_of(timer, struct device, power.suspend_timer);
     984             :         unsigned long flags;
     985             :         u64 expires;
     986             : 
     987           0 :         spin_lock_irqsave(&dev->power.lock, flags);
     988             : 
     989           0 :         expires = dev->power.timer_expires;
     990             :         /*
     991             :          * If 'expires' is after the current time, we've been called
     992             :          * too early.
     993             :          */
     994           0 :         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
     995           0 :                 dev->power.timer_expires = 0;
     996           0 :                 rpm_suspend(dev, dev->power.timer_autosuspends ?
     997             :                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
     998             :         }
     999             : 
    1000           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1001             : 
    1002           0 :         return HRTIMER_NORESTART;
    1003             : }
    1004             : 
    1005             : /**
    1006             :  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
    1007             :  * @dev: Device to suspend.
    1008             :  * @delay: Time to wait before submitting a suspend request, in milliseconds.
    1009             :  */
    1010           0 : int pm_schedule_suspend(struct device *dev, unsigned int delay)
    1011             : {
    1012             :         unsigned long flags;
    1013             :         u64 expires;
    1014             :         int retval;
    1015             : 
    1016           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1017             : 
    1018           0 :         if (!delay) {
    1019           0 :                 retval = rpm_suspend(dev, RPM_ASYNC);
    1020           0 :                 goto out;
    1021             :         }
    1022             : 
    1023           0 :         retval = rpm_check_suspend_allowed(dev);
    1024           0 :         if (retval)
    1025             :                 goto out;
    1026             : 
    1027             :         /* Other scheduled or pending requests need to be canceled. */
    1028           0 :         pm_runtime_cancel_pending(dev);
    1029             : 
    1030           0 :         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
    1031           0 :         dev->power.timer_expires = expires;
    1032           0 :         dev->power.timer_autosuspends = 0;
    1033           0 :         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
    1034             : 
    1035             :  out:
    1036           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1037             : 
    1038           0 :         return retval;
    1039             : }
    1040             : EXPORT_SYMBOL_GPL(pm_schedule_suspend);
    1041             : 
    1042             : /**
    1043             :  * __pm_runtime_idle - Entry point for runtime idle operations.
    1044             :  * @dev: Device to send idle notification for.
    1045             :  * @rpmflags: Flag bits.
    1046             :  *
    1047             :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1048             :  * return immediately if it is larger than zero.  Then carry out an idle
    1049             :  * notification, either synchronous or asynchronous.
    1050             :  *
    1051             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1052             :  * or if pm_runtime_irq_safe() has been called.
    1053             :  */
    1054           4 : int __pm_runtime_idle(struct device *dev, int rpmflags)
    1055             : {
    1056             :         unsigned long flags;
    1057             :         int retval;
    1058             : 
    1059           4 :         if (rpmflags & RPM_GET_PUT) {
    1060           0 :                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
    1061             :                         trace_rpm_usage_rcuidle(dev, rpmflags);
    1062             :                         return 0;
    1063             :                 }
    1064             :         }
    1065             : 
    1066             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1067             : 
    1068           4 :         spin_lock_irqsave(&dev->power.lock, flags);
    1069           4 :         retval = rpm_idle(dev, rpmflags);
    1070           8 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1071             : 
    1072           4 :         return retval;
    1073             : }
    1074             : EXPORT_SYMBOL_GPL(__pm_runtime_idle);
    1075             : 
    1076             : /**
    1077             :  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
    1078             :  * @dev: Device to suspend.
    1079             :  * @rpmflags: Flag bits.
    1080             :  *
    1081             :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1082             :  * return immediately if it is larger than zero.  Then carry out a suspend,
    1083             :  * either synchronous or asynchronous.
    1084             :  *
    1085             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1086             :  * or if pm_runtime_irq_safe() has been called.
    1087             :  */
    1088           0 : int __pm_runtime_suspend(struct device *dev, int rpmflags)
    1089             : {
    1090             :         unsigned long flags;
    1091             :         int retval;
    1092             : 
    1093           0 :         if (rpmflags & RPM_GET_PUT) {
    1094           0 :                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
    1095             :                         trace_rpm_usage_rcuidle(dev, rpmflags);
    1096             :                         return 0;
    1097             :                 }
    1098             :         }
    1099             : 
    1100             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1101             : 
    1102           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1103           0 :         retval = rpm_suspend(dev, rpmflags);
    1104           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1105             : 
    1106           0 :         return retval;
    1107             : }
    1108             : EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
    1109             : 
    1110             : /**
    1111             :  * __pm_runtime_resume - Entry point for runtime resume operations.
    1112             :  * @dev: Device to resume.
    1113             :  * @rpmflags: Flag bits.
    1114             :  *
    1115             :  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
    1116             :  * carry out a resume, either synchronous or asynchronous.
    1117             :  *
    1118             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1119             :  * or if pm_runtime_irq_safe() has been called.
    1120             :  */
    1121           0 : int __pm_runtime_resume(struct device *dev, int rpmflags)
    1122             : {
    1123             :         unsigned long flags;
    1124             :         int retval;
    1125             : 
    1126             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
    1127             :                         dev->power.runtime_status != RPM_ACTIVE);
    1128             : 
    1129           0 :         if (rpmflags & RPM_GET_PUT)
    1130           0 :                 atomic_inc(&dev->power.usage_count);
    1131             : 
    1132           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1133           0 :         retval = rpm_resume(dev, rpmflags);
    1134           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1135             : 
    1136           0 :         return retval;
    1137             : }
    1138             : EXPORT_SYMBOL_GPL(__pm_runtime_resume);
    1139             : 
    1140             : /**
    1141             :  * pm_runtime_get_if_active - Conditionally bump up device usage counter.
    1142             :  * @dev: Device to handle.
    1143             :  * @ign_usage_count: Whether or not to look at the current usage counter value.
    1144             :  *
    1145             :  * Return -EINVAL if runtime PM is disabled for @dev.
    1146             :  *
    1147             :  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
    1148             :  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
    1149             :  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
    1150             :  * without changing the usage counter.
    1151             :  *
    1152             :  * If @ign_usage_count is %true, this function can be used to prevent suspending
    1153             :  * the device when its runtime PM status is %RPM_ACTIVE.
    1154             :  *
    1155             :  * If @ign_usage_count is %false, this function can be used to prevent
    1156             :  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
    1157             :  * runtime PM usage counter is not zero.
    1158             :  *
    1159             :  * The caller is responsible for decrementing the runtime PM usage counter of
    1160             :  * @dev after this function has returned a positive value for it.
    1161             :  */
    1162           0 : int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
    1163             : {
    1164             :         unsigned long flags;
    1165             :         int retval;
    1166             : 
    1167           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1168           0 :         if (dev->power.disable_depth > 0) {
    1169             :                 retval = -EINVAL;
    1170           0 :         } else if (dev->power.runtime_status != RPM_ACTIVE) {
    1171             :                 retval = 0;
    1172           0 :         } else if (ign_usage_count) {
    1173           0 :                 retval = 1;
    1174           0 :                 atomic_inc(&dev->power.usage_count);
    1175             :         } else {
    1176           0 :                 retval = atomic_inc_not_zero(&dev->power.usage_count);
    1177             :         }
    1178           0 :         trace_rpm_usage_rcuidle(dev, 0);
    1179           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1180             : 
    1181           0 :         return retval;
    1182             : }
    1183             : EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
    1184             : 
    1185             : /**
    1186             :  * __pm_runtime_set_status - Set runtime PM status of a device.
    1187             :  * @dev: Device to handle.
    1188             :  * @status: New runtime PM status of the device.
    1189             :  *
    1190             :  * If runtime PM of the device is disabled or its power.runtime_error field is
    1191             :  * different from zero, the status may be changed either to RPM_ACTIVE, or to
    1192             :  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
    1193             :  * However, if the device has a parent and the parent is not active, and the
    1194             :  * parent's power.ignore_children flag is unset, the device's status cannot be
    1195             :  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
    1196             :  *
    1197             :  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
    1198             :  * and the device parent's counter of unsuspended children is modified to
    1199             :  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
    1200             :  * notification request for the parent is submitted.
    1201             :  *
    1202             :  * If @dev has any suppliers (as reflected by device links to them), and @status
    1203             :  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
    1204             :  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
    1205             :  * of the @status value) and the suppliers will be deacticated on exit.  The
    1206             :  * error returned by the failing supplier activation will be returned in that
    1207             :  * case.
    1208             :  */
    1209           0 : int __pm_runtime_set_status(struct device *dev, unsigned int status)
    1210             : {
    1211           0 :         struct device *parent = dev->parent;
    1212           0 :         bool notify_parent = false;
    1213           0 :         int error = 0;
    1214             : 
    1215           0 :         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
    1216             :                 return -EINVAL;
    1217             : 
    1218           0 :         spin_lock_irq(&dev->power.lock);
    1219             : 
    1220             :         /*
    1221             :          * Prevent PM-runtime from being enabled for the device or return an
    1222             :          * error if it is enabled already and working.
    1223             :          */
    1224           0 :         if (dev->power.runtime_error || dev->power.disable_depth)
    1225           0 :                 dev->power.disable_depth++;
    1226             :         else
    1227             :                 error = -EAGAIN;
    1228             : 
    1229           0 :         spin_unlock_irq(&dev->power.lock);
    1230             : 
    1231           0 :         if (error)
    1232             :                 return error;
    1233             : 
    1234             :         /*
    1235             :          * If the new status is RPM_ACTIVE, the suppliers can be activated
    1236             :          * upfront regardless of the current status, because next time
    1237             :          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
    1238             :          * involved will be dropped down to one anyway.
    1239             :          */
    1240           0 :         if (status == RPM_ACTIVE) {
    1241           0 :                 int idx = device_links_read_lock();
    1242             : 
    1243           0 :                 error = rpm_get_suppliers(dev);
    1244           0 :                 if (error)
    1245           0 :                         status = RPM_SUSPENDED;
    1246             : 
    1247           0 :                 device_links_read_unlock(idx);
    1248             :         }
    1249             : 
    1250           0 :         spin_lock_irq(&dev->power.lock);
    1251             : 
    1252           0 :         if (dev->power.runtime_status == status || !parent)
    1253             :                 goto out_set;
    1254             : 
    1255           0 :         if (status == RPM_SUSPENDED) {
    1256           0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
    1257           0 :                 notify_parent = !parent->power.ignore_children;
    1258             :         } else {
    1259           0 :                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
    1260             : 
    1261             :                 /*
    1262             :                  * It is invalid to put an active child under a parent that is
    1263             :                  * not active, has runtime PM enabled and the
    1264             :                  * 'power.ignore_children' flag unset.
    1265             :                  */
    1266           0 :                 if (!parent->power.disable_depth
    1267           0 :                     && !parent->power.ignore_children
    1268           0 :                     && parent->power.runtime_status != RPM_ACTIVE) {
    1269           0 :                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
    1270             :                                 dev_name(dev),
    1271             :                                 dev_name(parent));
    1272           0 :                         error = -EBUSY;
    1273           0 :                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
    1274           0 :                         atomic_inc(&parent->power.child_count);
    1275             :                 }
    1276             : 
    1277           0 :                 spin_unlock(&parent->power.lock);
    1278             : 
    1279           0 :                 if (error) {
    1280             :                         status = RPM_SUSPENDED;
    1281             :                         goto out;
    1282             :                 }
    1283             :         }
    1284             : 
    1285             :  out_set:
    1286           0 :         __update_runtime_status(dev, status);
    1287           0 :         if (!error)
    1288           0 :                 dev->power.runtime_error = 0;
    1289             : 
    1290             :  out:
    1291           0 :         spin_unlock_irq(&dev->power.lock);
    1292             : 
    1293           0 :         if (notify_parent)
    1294             :                 pm_request_idle(parent);
    1295             : 
    1296           0 :         if (status == RPM_SUSPENDED) {
    1297           0 :                 int idx = device_links_read_lock();
    1298             : 
    1299           0 :                 rpm_put_suppliers(dev);
    1300             : 
    1301           0 :                 device_links_read_unlock(idx);
    1302             :         }
    1303             : 
    1304           0 :         pm_runtime_enable(dev);
    1305             : 
    1306           0 :         return error;
    1307             : }
    1308             : EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
    1309             : 
    1310             : /**
    1311             :  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
    1312             :  * @dev: Device to handle.
    1313             :  *
    1314             :  * Flush all pending requests for the device from pm_wq and wait for all
    1315             :  * runtime PM operations involving the device in progress to complete.
    1316             :  *
    1317             :  * Should be called under dev->power.lock with interrupts disabled.
    1318             :  */
    1319         536 : static void __pm_runtime_barrier(struct device *dev)
    1320             : {
    1321         536 :         pm_runtime_deactivate_timer(dev);
    1322             : 
    1323         536 :         if (dev->power.request_pending) {
    1324           0 :                 dev->power.request = RPM_REQ_NONE;
    1325           0 :                 spin_unlock_irq(&dev->power.lock);
    1326             : 
    1327           0 :                 cancel_work_sync(&dev->power.work);
    1328             : 
    1329           0 :                 spin_lock_irq(&dev->power.lock);
    1330           0 :                 dev->power.request_pending = false;
    1331             :         }
    1332             : 
    1333        1072 :         if (dev->power.runtime_status == RPM_SUSPENDING
    1334         536 :             || dev->power.runtime_status == RPM_RESUMING
    1335         536 :             || dev->power.idle_notification) {
    1336           0 :                 DEFINE_WAIT(wait);
    1337             : 
    1338             :                 /* Suspend, wake-up or idle notification in progress. */
    1339             :                 for (;;) {
    1340           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
    1341             :                                         TASK_UNINTERRUPTIBLE);
    1342           0 :                         if (dev->power.runtime_status != RPM_SUSPENDING
    1343           0 :                             && dev->power.runtime_status != RPM_RESUMING
    1344           0 :                             && !dev->power.idle_notification)
    1345             :                                 break;
    1346           0 :                         spin_unlock_irq(&dev->power.lock);
    1347             : 
    1348           0 :                         schedule();
    1349             : 
    1350           0 :                         spin_lock_irq(&dev->power.lock);
    1351             :                 }
    1352           0 :                 finish_wait(&dev->power.wait_queue, &wait);
    1353             :         }
    1354         536 : }
    1355             : 
    1356             : /**
    1357             :  * pm_runtime_barrier - Flush pending requests and wait for completions.
    1358             :  * @dev: Device to handle.
    1359             :  *
    1360             :  * Prevent the device from being suspended by incrementing its usage counter and
    1361             :  * if there's a pending resume request for the device, wake the device up.
    1362             :  * Next, make sure that all pending requests for the device have been flushed
    1363             :  * from pm_wq and wait for all runtime PM operations involving the device in
    1364             :  * progress to complete.
    1365             :  *
    1366             :  * Return value:
    1367             :  * 1, if there was a resume request pending and the device had to be woken up,
    1368             :  * 0, otherwise
    1369             :  */
    1370         536 : int pm_runtime_barrier(struct device *dev)
    1371             : {
    1372         536 :         int retval = 0;
    1373             : 
    1374         536 :         pm_runtime_get_noresume(dev);
    1375        1072 :         spin_lock_irq(&dev->power.lock);
    1376             : 
    1377         536 :         if (dev->power.request_pending
    1378           0 :             && dev->power.request == RPM_REQ_RESUME) {
    1379           0 :                 rpm_resume(dev, 0);
    1380           0 :                 retval = 1;
    1381             :         }
    1382             : 
    1383         536 :         __pm_runtime_barrier(dev);
    1384             : 
    1385        1072 :         spin_unlock_irq(&dev->power.lock);
    1386         536 :         pm_runtime_put_noidle(dev);
    1387             : 
    1388         536 :         return retval;
    1389             : }
    1390             : EXPORT_SYMBOL_GPL(pm_runtime_barrier);
    1391             : 
    1392             : /**
    1393             :  * __pm_runtime_disable - Disable runtime PM of a device.
    1394             :  * @dev: Device to handle.
    1395             :  * @check_resume: If set, check if there's a resume request for the device.
    1396             :  *
    1397             :  * Increment power.disable_depth for the device and if it was zero previously,
    1398             :  * cancel all pending runtime PM requests for the device and wait for all
    1399             :  * operations in progress to complete.  The device can be either active or
    1400             :  * suspended after its runtime PM has been disabled.
    1401             :  *
    1402             :  * If @check_resume is set and there's a resume request pending when
    1403             :  * __pm_runtime_disable() is called and power.disable_depth is zero, the
    1404             :  * function will wake up the device before disabling its runtime PM.
    1405             :  */
    1406           0 : void __pm_runtime_disable(struct device *dev, bool check_resume)
    1407             : {
    1408           0 :         spin_lock_irq(&dev->power.lock);
    1409             : 
    1410           0 :         if (dev->power.disable_depth > 0) {
    1411           0 :                 dev->power.disable_depth++;
    1412           0 :                 goto out;
    1413             :         }
    1414             : 
    1415             :         /*
    1416             :          * Wake up the device if there's a resume request pending, because that
    1417             :          * means there probably is some I/O to process and disabling runtime PM
    1418             :          * shouldn't prevent the device from processing the I/O.
    1419             :          */
    1420           0 :         if (check_resume && dev->power.request_pending
    1421           0 :             && dev->power.request == RPM_REQ_RESUME) {
    1422             :                 /*
    1423             :                  * Prevent suspends and idle notifications from being carried
    1424             :                  * out after we have woken up the device.
    1425             :                  */
    1426           0 :                 pm_runtime_get_noresume(dev);
    1427             : 
    1428           0 :                 rpm_resume(dev, 0);
    1429             : 
    1430             :                 pm_runtime_put_noidle(dev);
    1431             :         }
    1432             : 
    1433             :         /* Update time accounting before disabling PM-runtime. */
    1434           0 :         update_pm_runtime_accounting(dev);
    1435             : 
    1436           0 :         if (!dev->power.disable_depth++) {
    1437           0 :                 __pm_runtime_barrier(dev);
    1438           0 :                 dev->power.last_status = dev->power.runtime_status;
    1439             :         }
    1440             : 
    1441             :  out:
    1442           0 :         spin_unlock_irq(&dev->power.lock);
    1443           0 : }
    1444             : EXPORT_SYMBOL_GPL(__pm_runtime_disable);
    1445             : 
    1446             : /**
    1447             :  * pm_runtime_enable - Enable runtime PM of a device.
    1448             :  * @dev: Device to handle.
    1449             :  */
    1450           0 : void pm_runtime_enable(struct device *dev)
    1451             : {
    1452             :         unsigned long flags;
    1453             : 
    1454           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1455             : 
    1456           0 :         if (!dev->power.disable_depth) {
    1457           0 :                 dev_warn(dev, "Unbalanced %s!\n", __func__);
    1458           0 :                 goto out;
    1459             :         }
    1460             : 
    1461           0 :         if (--dev->power.disable_depth > 0)
    1462             :                 goto out;
    1463             : 
    1464           0 :         dev->power.last_status = RPM_INVALID;
    1465           0 :         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
    1466             : 
    1467           0 :         if (dev->power.runtime_status == RPM_SUSPENDED &&
    1468           0 :             !dev->power.ignore_children &&
    1469           0 :             atomic_read(&dev->power.child_count) > 0)
    1470           0 :                 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
    1471             : 
    1472             : out:
    1473           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1474           0 : }
    1475             : EXPORT_SYMBOL_GPL(pm_runtime_enable);
    1476             : 
    1477           0 : static void pm_runtime_disable_action(void *data)
    1478             : {
    1479           0 :         pm_runtime_dont_use_autosuspend(data);
    1480           0 :         pm_runtime_disable(data);
    1481           0 : }
    1482             : 
    1483             : /**
    1484             :  * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
    1485             :  *
    1486             :  * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
    1487             :  * you at driver exit time if needed.
    1488             :  *
    1489             :  * @dev: Device to handle.
    1490             :  */
    1491           0 : int devm_pm_runtime_enable(struct device *dev)
    1492             : {
    1493           0 :         pm_runtime_enable(dev);
    1494             : 
    1495           0 :         return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
    1496             : }
    1497             : EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
    1498             : 
    1499             : /**
    1500             :  * pm_runtime_forbid - Block runtime PM of a device.
    1501             :  * @dev: Device to handle.
    1502             :  *
    1503             :  * Increase the device's usage count and clear its power.runtime_auto flag,
    1504             :  * so that it cannot be suspended at run time until pm_runtime_allow() is called
    1505             :  * for it.
    1506             :  */
    1507           0 : void pm_runtime_forbid(struct device *dev)
    1508             : {
    1509           0 :         spin_lock_irq(&dev->power.lock);
    1510           0 :         if (!dev->power.runtime_auto)
    1511             :                 goto out;
    1512             : 
    1513           0 :         dev->power.runtime_auto = false;
    1514           0 :         atomic_inc(&dev->power.usage_count);
    1515           0 :         rpm_resume(dev, 0);
    1516             : 
    1517             :  out:
    1518           0 :         spin_unlock_irq(&dev->power.lock);
    1519           0 : }
    1520             : EXPORT_SYMBOL_GPL(pm_runtime_forbid);
    1521             : 
    1522             : /**
    1523             :  * pm_runtime_allow - Unblock runtime PM of a device.
    1524             :  * @dev: Device to handle.
    1525             :  *
    1526             :  * Decrease the device's usage count and set its power.runtime_auto flag.
    1527             :  */
    1528           0 : void pm_runtime_allow(struct device *dev)
    1529             : {
    1530           0 :         spin_lock_irq(&dev->power.lock);
    1531           0 :         if (dev->power.runtime_auto)
    1532             :                 goto out;
    1533             : 
    1534           0 :         dev->power.runtime_auto = true;
    1535           0 :         if (atomic_dec_and_test(&dev->power.usage_count))
    1536           0 :                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
    1537             :         else
    1538             :                 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
    1539             : 
    1540             :  out:
    1541           0 :         spin_unlock_irq(&dev->power.lock);
    1542           0 : }
    1543             : EXPORT_SYMBOL_GPL(pm_runtime_allow);
    1544             : 
    1545             : /**
    1546             :  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
    1547             :  * @dev: Device to handle.
    1548             :  *
    1549             :  * Set the power.no_callbacks flag, which tells the PM core that this
    1550             :  * device is power-managed through its parent and has no runtime PM
    1551             :  * callbacks of its own.  The runtime sysfs attributes will be removed.
    1552             :  */
    1553           0 : void pm_runtime_no_callbacks(struct device *dev)
    1554             : {
    1555           0 :         spin_lock_irq(&dev->power.lock);
    1556           0 :         dev->power.no_callbacks = 1;
    1557           0 :         spin_unlock_irq(&dev->power.lock);
    1558           0 :         if (device_is_registered(dev))
    1559           0 :                 rpm_sysfs_remove(dev);
    1560           0 : }
    1561             : EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
    1562             : 
    1563             : /**
    1564             :  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
    1565             :  * @dev: Device to handle
    1566             :  *
    1567             :  * Set the power.irq_safe flag, which tells the PM core that the
    1568             :  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
    1569             :  * always be invoked with the spinlock held and interrupts disabled.  It also
    1570             :  * causes the parent's usage counter to be permanently incremented, preventing
    1571             :  * the parent from runtime suspending -- otherwise an irq-safe child might have
    1572             :  * to wait for a non-irq-safe parent.
    1573             :  */
    1574           0 : void pm_runtime_irq_safe(struct device *dev)
    1575             : {
    1576           0 :         if (dev->parent)
    1577           0 :                 pm_runtime_get_sync(dev->parent);
    1578           0 :         spin_lock_irq(&dev->power.lock);
    1579           0 :         dev->power.irq_safe = 1;
    1580           0 :         spin_unlock_irq(&dev->power.lock);
    1581           0 : }
    1582             : EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
    1583             : 
    1584             : /**
    1585             :  * update_autosuspend - Handle a change to a device's autosuspend settings.
    1586             :  * @dev: Device to handle.
    1587             :  * @old_delay: The former autosuspend_delay value.
    1588             :  * @old_use: The former use_autosuspend value.
    1589             :  *
    1590             :  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
    1591             :  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
    1592             :  *
    1593             :  * This function must be called under dev->power.lock with interrupts disabled.
    1594             :  */
    1595           0 : static void update_autosuspend(struct device *dev, int old_delay, int old_use)
    1596             : {
    1597           0 :         int delay = dev->power.autosuspend_delay;
    1598             : 
    1599             :         /* Should runtime suspend be prevented now? */
    1600           0 :         if (dev->power.use_autosuspend && delay < 0) {
    1601             : 
    1602             :                 /* If it used to be allowed then prevent it. */
    1603           0 :                 if (!old_use || old_delay >= 0) {
    1604           0 :                         atomic_inc(&dev->power.usage_count);
    1605           0 :                         rpm_resume(dev, 0);
    1606             :                 } else {
    1607             :                         trace_rpm_usage_rcuidle(dev, 0);
    1608             :                 }
    1609             :         }
    1610             : 
    1611             :         /* Runtime suspend should be allowed now. */
    1612             :         else {
    1613             : 
    1614             :                 /* If it used to be prevented then allow it. */
    1615           0 :                 if (old_use && old_delay < 0)
    1616           0 :                         atomic_dec(&dev->power.usage_count);
    1617             : 
    1618             :                 /* Maybe we can autosuspend now. */
    1619           0 :                 rpm_idle(dev, RPM_AUTO);
    1620             :         }
    1621           0 : }
    1622             : 
    1623             : /**
    1624             :  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
    1625             :  * @dev: Device to handle.
    1626             :  * @delay: Value of the new delay in milliseconds.
    1627             :  *
    1628             :  * Set the device's power.autosuspend_delay value.  If it changes to negative
    1629             :  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
    1630             :  * changes the other way, allow runtime suspends.
    1631             :  */
    1632           0 : void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
    1633             : {
    1634             :         int old_delay, old_use;
    1635             : 
    1636           0 :         spin_lock_irq(&dev->power.lock);
    1637           0 :         old_delay = dev->power.autosuspend_delay;
    1638           0 :         old_use = dev->power.use_autosuspend;
    1639           0 :         dev->power.autosuspend_delay = delay;
    1640           0 :         update_autosuspend(dev, old_delay, old_use);
    1641           0 :         spin_unlock_irq(&dev->power.lock);
    1642           0 : }
    1643             : EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
    1644             : 
    1645             : /**
    1646             :  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
    1647             :  * @dev: Device to handle.
    1648             :  * @use: New value for use_autosuspend.
    1649             :  *
    1650             :  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
    1651             :  * suspends as needed.
    1652             :  */
    1653           0 : void __pm_runtime_use_autosuspend(struct device *dev, bool use)
    1654             : {
    1655             :         int old_delay, old_use;
    1656             : 
    1657           0 :         spin_lock_irq(&dev->power.lock);
    1658           0 :         old_delay = dev->power.autosuspend_delay;
    1659           0 :         old_use = dev->power.use_autosuspend;
    1660           0 :         dev->power.use_autosuspend = use;
    1661           0 :         update_autosuspend(dev, old_delay, old_use);
    1662           0 :         spin_unlock_irq(&dev->power.lock);
    1663           0 : }
    1664             : EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
    1665             : 
    1666             : /**
    1667             :  * pm_runtime_init - Initialize runtime PM fields in given device object.
    1668             :  * @dev: Device object to initialize.
    1669             :  */
    1670         536 : void pm_runtime_init(struct device *dev)
    1671             : {
    1672         536 :         dev->power.runtime_status = RPM_SUSPENDED;
    1673         536 :         dev->power.last_status = RPM_INVALID;
    1674         536 :         dev->power.idle_notification = false;
    1675             : 
    1676         536 :         dev->power.disable_depth = 1;
    1677        1072 :         atomic_set(&dev->power.usage_count, 0);
    1678             : 
    1679         536 :         dev->power.runtime_error = 0;
    1680             : 
    1681        1072 :         atomic_set(&dev->power.child_count, 0);
    1682         536 :         pm_suspend_ignore_children(dev, false);
    1683         536 :         dev->power.runtime_auto = true;
    1684             : 
    1685         536 :         dev->power.request_pending = false;
    1686         536 :         dev->power.request = RPM_REQ_NONE;
    1687         536 :         dev->power.deferred_resume = false;
    1688         536 :         dev->power.needs_force_resume = 0;
    1689        1072 :         INIT_WORK(&dev->power.work, pm_runtime_work);
    1690             : 
    1691         536 :         dev->power.timer_expires = 0;
    1692         536 :         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    1693         536 :         dev->power.suspend_timer.function = pm_suspend_timer_fn;
    1694             : 
    1695         536 :         init_waitqueue_head(&dev->power.wait_queue);
    1696         536 : }
    1697             : 
    1698             : /**
    1699             :  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
    1700             :  * @dev: Device object to re-initialize.
    1701             :  */
    1702           0 : void pm_runtime_reinit(struct device *dev)
    1703             : {
    1704           0 :         if (!pm_runtime_enabled(dev)) {
    1705           0 :                 if (dev->power.runtime_status == RPM_ACTIVE)
    1706             :                         pm_runtime_set_suspended(dev);
    1707           0 :                 if (dev->power.irq_safe) {
    1708           0 :                         spin_lock_irq(&dev->power.lock);
    1709           0 :                         dev->power.irq_safe = 0;
    1710           0 :                         spin_unlock_irq(&dev->power.lock);
    1711           0 :                         if (dev->parent)
    1712           0 :                                 pm_runtime_put(dev->parent);
    1713             :                 }
    1714             :         }
    1715           0 : }
    1716             : 
    1717             : /**
    1718             :  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
    1719             :  * @dev: Device object being removed from device hierarchy.
    1720             :  */
    1721           0 : void pm_runtime_remove(struct device *dev)
    1722             : {
    1723           0 :         __pm_runtime_disable(dev, false);
    1724           0 :         pm_runtime_reinit(dev);
    1725           0 : }
    1726             : 
    1727             : /**
    1728             :  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
    1729             :  * @dev: Consumer device.
    1730             :  */
    1731           0 : void pm_runtime_get_suppliers(struct device *dev)
    1732             : {
    1733             :         struct device_link *link;
    1734             :         int idx;
    1735             : 
    1736           0 :         idx = device_links_read_lock();
    1737             : 
    1738           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1739             :                                 device_links_read_lock_held())
    1740           0 :                 if (link->flags & DL_FLAG_PM_RUNTIME) {
    1741           0 :                         link->supplier_preactivated = true;
    1742           0 :                         pm_runtime_get_sync(link->supplier);
    1743           0 :                         refcount_inc(&link->rpm_active);
    1744             :                 }
    1745             : 
    1746           0 :         device_links_read_unlock(idx);
    1747           0 : }
    1748             : 
    1749             : /**
    1750             :  * pm_runtime_put_suppliers - Drop references to supplier devices.
    1751             :  * @dev: Consumer device.
    1752             :  */
    1753           0 : void pm_runtime_put_suppliers(struct device *dev)
    1754             : {
    1755             :         struct device_link *link;
    1756             :         int idx;
    1757             : 
    1758           0 :         idx = device_links_read_lock();
    1759             : 
    1760           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1761             :                                 device_links_read_lock_held())
    1762           0 :                 if (link->supplier_preactivated) {
    1763             :                         bool put;
    1764             : 
    1765           0 :                         link->supplier_preactivated = false;
    1766             : 
    1767           0 :                         spin_lock_irq(&dev->power.lock);
    1768             : 
    1769           0 :                         put = pm_runtime_status_suspended(dev) &&
    1770           0 :                               refcount_dec_not_one(&link->rpm_active);
    1771             : 
    1772           0 :                         spin_unlock_irq(&dev->power.lock);
    1773             : 
    1774           0 :                         if (put)
    1775           0 :                                 pm_runtime_put(link->supplier);
    1776             :                 }
    1777             : 
    1778           0 :         device_links_read_unlock(idx);
    1779           0 : }
    1780             : 
    1781           0 : void pm_runtime_new_link(struct device *dev)
    1782             : {
    1783           0 :         spin_lock_irq(&dev->power.lock);
    1784           0 :         dev->power.links_count++;
    1785           0 :         spin_unlock_irq(&dev->power.lock);
    1786           0 : }
    1787             : 
    1788           0 : static void pm_runtime_drop_link_count(struct device *dev)
    1789             : {
    1790           0 :         spin_lock_irq(&dev->power.lock);
    1791           0 :         WARN_ON(dev->power.links_count == 0);
    1792           0 :         dev->power.links_count--;
    1793           0 :         spin_unlock_irq(&dev->power.lock);
    1794           0 : }
    1795             : 
    1796             : /**
    1797             :  * pm_runtime_drop_link - Prepare for device link removal.
    1798             :  * @link: Device link going away.
    1799             :  *
    1800             :  * Drop the link count of the consumer end of @link and decrement the supplier
    1801             :  * device's runtime PM usage counter as many times as needed to drop all of the
    1802             :  * PM runtime reference to it from the consumer.
    1803             :  */
    1804           0 : void pm_runtime_drop_link(struct device_link *link)
    1805             : {
    1806           0 :         if (!(link->flags & DL_FLAG_PM_RUNTIME))
    1807             :                 return;
    1808             : 
    1809           0 :         pm_runtime_drop_link_count(link->consumer);
    1810           0 :         pm_runtime_release_supplier(link, true);
    1811             : }
    1812             : 
    1813             : static bool pm_runtime_need_not_resume(struct device *dev)
    1814             : {
    1815           0 :         return atomic_read(&dev->power.usage_count) <= 1 &&
    1816           0 :                 (atomic_read(&dev->power.child_count) == 0 ||
    1817             :                  dev->power.ignore_children);
    1818             : }
    1819             : 
    1820             : /**
    1821             :  * pm_runtime_force_suspend - Force a device into suspend state if needed.
    1822             :  * @dev: Device to suspend.
    1823             :  *
    1824             :  * Disable runtime PM so we safely can check the device's runtime PM status and
    1825             :  * if it is active, invoke its ->runtime_suspend callback to suspend it and
    1826             :  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
    1827             :  * usage and children counters don't indicate that the device was in use before
    1828             :  * the system-wide transition under way, decrement its parent's children counter
    1829             :  * (if there is a parent).  Keep runtime PM disabled to preserve the state
    1830             :  * unless we encounter errors.
    1831             :  *
    1832             :  * Typically this function may be invoked from a system suspend callback to make
    1833             :  * sure the device is put into low power state and it should only be used during
    1834             :  * system-wide PM transitions to sleep states.  It assumes that the analogous
    1835             :  * pm_runtime_force_resume() will be used to resume the device.
    1836             :  */
    1837           0 : int pm_runtime_force_suspend(struct device *dev)
    1838             : {
    1839             :         int (*callback)(struct device *);
    1840             :         int ret;
    1841             : 
    1842           0 :         pm_runtime_disable(dev);
    1843           0 :         if (pm_runtime_status_suspended(dev))
    1844             :                 return 0;
    1845             : 
    1846           0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
    1847             : 
    1848           0 :         ret = callback ? callback(dev) : 0;
    1849           0 :         if (ret)
    1850             :                 goto err;
    1851             : 
    1852             :         /*
    1853             :          * If the device can stay in suspend after the system-wide transition
    1854             :          * to the working state that will follow, drop the children counter of
    1855             :          * its parent, but set its status to RPM_SUSPENDED anyway in case this
    1856             :          * function will be called again for it in the meantime.
    1857             :          */
    1858           0 :         if (pm_runtime_need_not_resume(dev)) {
    1859             :                 pm_runtime_set_suspended(dev);
    1860             :         } else {
    1861           0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
    1862           0 :                 dev->power.needs_force_resume = 1;
    1863             :         }
    1864             : 
    1865             :         return 0;
    1866             : 
    1867             : err:
    1868           0 :         pm_runtime_enable(dev);
    1869           0 :         return ret;
    1870             : }
    1871             : EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
    1872             : 
    1873             : /**
    1874             :  * pm_runtime_force_resume - Force a device into resume state if needed.
    1875             :  * @dev: Device to resume.
    1876             :  *
    1877             :  * Prior invoking this function we expect the user to have brought the device
    1878             :  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
    1879             :  * those actions and bring the device into full power, if it is expected to be
    1880             :  * used on system resume.  In the other case, we defer the resume to be managed
    1881             :  * via runtime PM.
    1882             :  *
    1883             :  * Typically this function may be invoked from a system resume callback.
    1884             :  */
    1885           0 : int pm_runtime_force_resume(struct device *dev)
    1886             : {
    1887             :         int (*callback)(struct device *);
    1888           0 :         int ret = 0;
    1889             : 
    1890           0 :         if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
    1891             :                 goto out;
    1892             : 
    1893             :         /*
    1894             :          * The value of the parent's children counter is correct already, so
    1895             :          * just update the status of the device.
    1896             :          */
    1897           0 :         __update_runtime_status(dev, RPM_ACTIVE);
    1898             : 
    1899           0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
    1900             : 
    1901           0 :         ret = callback ? callback(dev) : 0;
    1902           0 :         if (ret) {
    1903             :                 pm_runtime_set_suspended(dev);
    1904             :                 goto out;
    1905             :         }
    1906             : 
    1907             :         pm_runtime_mark_last_busy(dev);
    1908             : out:
    1909           0 :         dev->power.needs_force_resume = 0;
    1910           0 :         pm_runtime_enable(dev);
    1911           0 :         return ret;
    1912             : }
    1913             : EXPORT_SYMBOL_GPL(pm_runtime_force_resume);

Generated by: LCOV version 1.14