LCOV - code coverage report
Current view: top level - include/linux - interrupt.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 3 33.3 %
Date: 2022-12-09 01:23:36 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /* interrupt.h */
       3             : #ifndef _LINUX_INTERRUPT_H
       4             : #define _LINUX_INTERRUPT_H
       5             : 
       6             : #include <linux/kernel.h>
       7             : #include <linux/bitops.h>
       8             : #include <linux/cpumask.h>
       9             : #include <linux/irqreturn.h>
      10             : #include <linux/irqnr.h>
      11             : #include <linux/hardirq.h>
      12             : #include <linux/irqflags.h>
      13             : #include <linux/hrtimer.h>
      14             : #include <linux/kref.h>
      15             : #include <linux/workqueue.h>
      16             : #include <linux/jump_label.h>
      17             : 
      18             : #include <linux/atomic.h>
      19             : #include <asm/ptrace.h>
      20             : #include <asm/irq.h>
      21             : #include <asm/sections.h>
      22             : 
      23             : /*
      24             :  * These correspond to the IORESOURCE_IRQ_* defines in
      25             :  * linux/ioport.h to select the interrupt line behaviour.  When
      26             :  * requesting an interrupt without specifying a IRQF_TRIGGER, the
      27             :  * setting should be assumed to be "as already configured", which
      28             :  * may be as per machine or firmware initialisation.
      29             :  */
      30             : #define IRQF_TRIGGER_NONE       0x00000000
      31             : #define IRQF_TRIGGER_RISING     0x00000001
      32             : #define IRQF_TRIGGER_FALLING    0x00000002
      33             : #define IRQF_TRIGGER_HIGH       0x00000004
      34             : #define IRQF_TRIGGER_LOW        0x00000008
      35             : #define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
      36             :                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
      37             : #define IRQF_TRIGGER_PROBE      0x00000010
      38             : 
      39             : /*
      40             :  * These flags used only by the kernel as part of the
      41             :  * irq handling routines.
      42             :  *
      43             :  * IRQF_SHARED - allow sharing the irq among several devices
      44             :  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
      45             :  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
      46             :  * IRQF_PERCPU - Interrupt is per cpu
      47             :  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
      48             :  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
      49             :  *                registered first in a shared interrupt is considered for
      50             :  *                performance reasons)
      51             :  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
      52             :  *                Used by threaded interrupts which need to keep the
      53             :  *                irq line disabled until the threaded handler has been run.
      54             :  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
      55             :  *                   that this interrupt will wake the system from a suspended
      56             :  *                   state.  See Documentation/power/suspend-and-interrupts.rst
      57             :  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
      58             :  * IRQF_NO_THREAD - Interrupt cannot be threaded
      59             :  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
      60             :  *                resume time.
      61             :  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
      62             :  *                interrupt handler after suspending interrupts. For system
      63             :  *                wakeup devices users need to implement wakeup detection in
      64             :  *                their interrupt handlers.
      65             :  * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
      66             :  *                Users will enable it explicitly by enable_irq() or enable_nmi()
      67             :  *                later.
      68             :  * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
      69             :  *                 depends on IRQF_PERCPU.
      70             :  */
      71             : #define IRQF_SHARED             0x00000080
      72             : #define IRQF_PROBE_SHARED       0x00000100
      73             : #define __IRQF_TIMER            0x00000200
      74             : #define IRQF_PERCPU             0x00000400
      75             : #define IRQF_NOBALANCING        0x00000800
      76             : #define IRQF_IRQPOLL            0x00001000
      77             : #define IRQF_ONESHOT            0x00002000
      78             : #define IRQF_NO_SUSPEND         0x00004000
      79             : #define IRQF_FORCE_RESUME       0x00008000
      80             : #define IRQF_NO_THREAD          0x00010000
      81             : #define IRQF_EARLY_RESUME       0x00020000
      82             : #define IRQF_COND_SUSPEND       0x00040000
      83             : #define IRQF_NO_AUTOEN          0x00080000
      84             : #define IRQF_NO_DEBUG           0x00100000
      85             : 
      86             : #define IRQF_TIMER              (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
      87             : 
      88             : /*
      89             :  * These values can be returned by request_any_context_irq() and
      90             :  * describe the context the interrupt will be run in.
      91             :  *
      92             :  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
      93             :  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
      94             :  */
      95             : enum {
      96             :         IRQC_IS_HARDIRQ = 0,
      97             :         IRQC_IS_NESTED,
      98             : };
      99             : 
     100             : typedef irqreturn_t (*irq_handler_t)(int, void *);
     101             : 
     102             : /**
     103             :  * struct irqaction - per interrupt action descriptor
     104             :  * @handler:    interrupt handler function
     105             :  * @name:       name of the device
     106             :  * @dev_id:     cookie to identify the device
     107             :  * @percpu_dev_id:      cookie to identify the device
     108             :  * @next:       pointer to the next irqaction for shared interrupts
     109             :  * @irq:        interrupt number
     110             :  * @flags:      flags (see IRQF_* above)
     111             :  * @thread_fn:  interrupt handler function for threaded interrupts
     112             :  * @thread:     thread pointer for threaded interrupts
     113             :  * @secondary:  pointer to secondary irqaction (force threading)
     114             :  * @thread_flags:       flags related to @thread
     115             :  * @thread_mask:        bitmask for keeping track of @thread activity
     116             :  * @dir:        pointer to the proc/irq/NN/name entry
     117             :  */
     118             : struct irqaction {
     119             :         irq_handler_t           handler;
     120             :         void                    *dev_id;
     121             :         void __percpu           *percpu_dev_id;
     122             :         struct irqaction        *next;
     123             :         irq_handler_t           thread_fn;
     124             :         struct task_struct      *thread;
     125             :         struct irqaction        *secondary;
     126             :         unsigned int            irq;
     127             :         unsigned int            flags;
     128             :         unsigned long           thread_flags;
     129             :         unsigned long           thread_mask;
     130             :         const char              *name;
     131             :         struct proc_dir_entry   *dir;
     132             : } ____cacheline_internodealigned_in_smp;
     133             : 
     134             : extern irqreturn_t no_action(int cpl, void *dev_id);
     135             : 
     136             : /*
     137             :  * If a (PCI) device interrupt is not connected we set dev->irq to
     138             :  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
     139             :  * can distingiush that case from other error returns.
     140             :  *
     141             :  * 0x80000000 is guaranteed to be outside the available range of interrupts
     142             :  * and easy to distinguish from other possible incorrect values.
     143             :  */
     144             : #define IRQ_NOTCONNECTED        (1U << 31)
     145             : 
     146             : extern int __must_check
     147             : request_threaded_irq(unsigned int irq, irq_handler_t handler,
     148             :                      irq_handler_t thread_fn,
     149             :                      unsigned long flags, const char *name, void *dev);
     150             : 
     151             : /**
     152             :  * request_irq - Add a handler for an interrupt line
     153             :  * @irq:        The interrupt line to allocate
     154             :  * @handler:    Function to be called when the IRQ occurs.
     155             :  *              Primary handler for threaded interrupts
     156             :  *              If NULL, the default primary handler is installed
     157             :  * @flags:      Handling flags
     158             :  * @name:       Name of the device generating this interrupt
     159             :  * @dev:        A cookie passed to the handler function
     160             :  *
     161             :  * This call allocates an interrupt and establishes a handler; see
     162             :  * the documentation for request_threaded_irq() for details.
     163             :  */
     164             : static inline int __must_check
     165             : request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
     166             :             const char *name, void *dev)
     167             : {
     168           2 :         return request_threaded_irq(irq, handler, NULL, flags, name, dev);
     169             : }
     170             : 
     171             : extern int __must_check
     172             : request_any_context_irq(unsigned int irq, irq_handler_t handler,
     173             :                         unsigned long flags, const char *name, void *dev_id);
     174             : 
     175             : extern int __must_check
     176             : __request_percpu_irq(unsigned int irq, irq_handler_t handler,
     177             :                      unsigned long flags, const char *devname,
     178             :                      void __percpu *percpu_dev_id);
     179             : 
     180             : extern int __must_check
     181             : request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
     182             :             const char *name, void *dev);
     183             : 
     184             : static inline int __must_check
     185             : request_percpu_irq(unsigned int irq, irq_handler_t handler,
     186             :                    const char *devname, void __percpu *percpu_dev_id)
     187             : {
     188             :         return __request_percpu_irq(irq, handler, 0,
     189             :                                     devname, percpu_dev_id);
     190             : }
     191             : 
     192             : extern int __must_check
     193             : request_percpu_nmi(unsigned int irq, irq_handler_t handler,
     194             :                    const char *devname, void __percpu *dev);
     195             : 
     196             : extern const void *free_irq(unsigned int, void *);
     197             : extern void free_percpu_irq(unsigned int, void __percpu *);
     198             : 
     199             : extern const void *free_nmi(unsigned int irq, void *dev_id);
     200             : extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
     201             : 
     202             : struct device;
     203             : 
     204             : extern int __must_check
     205             : devm_request_threaded_irq(struct device *dev, unsigned int irq,
     206             :                           irq_handler_t handler, irq_handler_t thread_fn,
     207             :                           unsigned long irqflags, const char *devname,
     208             :                           void *dev_id);
     209             : 
     210             : static inline int __must_check
     211             : devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
     212             :                  unsigned long irqflags, const char *devname, void *dev_id)
     213             : {
     214             :         return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
     215             :                                          devname, dev_id);
     216             : }
     217             : 
     218             : extern int __must_check
     219             : devm_request_any_context_irq(struct device *dev, unsigned int irq,
     220             :                  irq_handler_t handler, unsigned long irqflags,
     221             :                  const char *devname, void *dev_id);
     222             : 
     223             : extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
     224             : 
     225             : /*
     226             :  * On lockdep we dont want to enable hardirqs in hardirq
     227             :  * context. Use local_irq_enable_in_hardirq() to annotate
     228             :  * kernel code that has to do this nevertheless (pretty much
     229             :  * the only valid case is for old/broken hardware that is
     230             :  * insanely slow).
     231             :  *
     232             :  * NOTE: in theory this might break fragile code that relies
     233             :  * on hardirq delivery - in practice we dont seem to have such
     234             :  * places left. So the only effect should be slightly increased
     235             :  * irqs-off latencies.
     236             :  */
     237             : #ifdef CONFIG_LOCKDEP
     238             : # define local_irq_enable_in_hardirq()  do { } while (0)
     239             : #else
     240             : # define local_irq_enable_in_hardirq()  local_irq_enable()
     241             : #endif
     242             : 
     243             : bool irq_has_action(unsigned int irq);
     244             : extern void disable_irq_nosync(unsigned int irq);
     245             : extern bool disable_hardirq(unsigned int irq);
     246             : extern void disable_irq(unsigned int irq);
     247             : extern void disable_percpu_irq(unsigned int irq);
     248             : extern void enable_irq(unsigned int irq);
     249             : extern void enable_percpu_irq(unsigned int irq, unsigned int type);
     250             : extern bool irq_percpu_is_enabled(unsigned int irq);
     251             : extern void irq_wake_thread(unsigned int irq, void *dev_id);
     252             : 
     253             : extern void disable_nmi_nosync(unsigned int irq);
     254             : extern void disable_percpu_nmi(unsigned int irq);
     255             : extern void enable_nmi(unsigned int irq);
     256             : extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
     257             : extern int prepare_percpu_nmi(unsigned int irq);
     258             : extern void teardown_percpu_nmi(unsigned int irq);
     259             : 
     260             : extern int irq_inject_interrupt(unsigned int irq);
     261             : 
     262             : /* The following three functions are for the core kernel use only. */
     263             : extern void suspend_device_irqs(void);
     264             : extern void resume_device_irqs(void);
     265             : extern void rearm_wake_irq(unsigned int irq);
     266             : 
     267             : /**
     268             :  * struct irq_affinity_notify - context for notification of IRQ affinity changes
     269             :  * @irq:                Interrupt to which notification applies
     270             :  * @kref:               Reference count, for internal use
     271             :  * @work:               Work item, for internal use
     272             :  * @notify:             Function to be called on change.  This will be
     273             :  *                      called in process context.
     274             :  * @release:            Function to be called on release.  This will be
     275             :  *                      called in process context.  Once registered, the
     276             :  *                      structure must only be freed when this function is
     277             :  *                      called or later.
     278             :  */
     279             : struct irq_affinity_notify {
     280             :         unsigned int irq;
     281             :         struct kref kref;
     282             :         struct work_struct work;
     283             :         void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
     284             :         void (*release)(struct kref *ref);
     285             : };
     286             : 
     287             : #define IRQ_AFFINITY_MAX_SETS  4
     288             : 
     289             : /**
     290             :  * struct irq_affinity - Description for automatic irq affinity assignements
     291             :  * @pre_vectors:        Don't apply affinity to @pre_vectors at beginning of
     292             :  *                      the MSI(-X) vector space
     293             :  * @post_vectors:       Don't apply affinity to @post_vectors at end of
     294             :  *                      the MSI(-X) vector space
     295             :  * @nr_sets:            The number of interrupt sets for which affinity
     296             :  *                      spreading is required
     297             :  * @set_size:           Array holding the size of each interrupt set
     298             :  * @calc_sets:          Callback for calculating the number and size
     299             :  *                      of interrupt sets
     300             :  * @priv:               Private data for usage by @calc_sets, usually a
     301             :  *                      pointer to driver/device specific data.
     302             :  */
     303             : struct irq_affinity {
     304             :         unsigned int    pre_vectors;
     305             :         unsigned int    post_vectors;
     306             :         unsigned int    nr_sets;
     307             :         unsigned int    set_size[IRQ_AFFINITY_MAX_SETS];
     308             :         void            (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
     309             :         void            *priv;
     310             : };
     311             : 
     312             : /**
     313             :  * struct irq_affinity_desc - Interrupt affinity descriptor
     314             :  * @mask:       cpumask to hold the affinity assignment
     315             :  * @is_managed: 1 if the interrupt is managed internally
     316             :  */
     317             : struct irq_affinity_desc {
     318             :         struct cpumask  mask;
     319             :         unsigned int    is_managed : 1;
     320             : };
     321             : 
     322             : #if defined(CONFIG_SMP)
     323             : 
     324             : extern cpumask_var_t irq_default_affinity;
     325             : 
     326             : extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
     327             : extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
     328             : 
     329             : extern int irq_can_set_affinity(unsigned int irq);
     330             : extern int irq_select_affinity(unsigned int irq);
     331             : 
     332             : extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
     333             :                                      bool setaffinity);
     334             : 
     335             : /**
     336             :  * irq_update_affinity_hint - Update the affinity hint
     337             :  * @irq:        Interrupt to update
     338             :  * @m:          cpumask pointer (NULL to clear the hint)
     339             :  *
     340             :  * Updates the affinity hint, but does not change the affinity of the interrupt.
     341             :  */
     342             : static inline int
     343             : irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
     344             : {
     345             :         return __irq_apply_affinity_hint(irq, m, false);
     346             : }
     347             : 
     348             : /**
     349             :  * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
     350             :  *                           cpumask to the interrupt
     351             :  * @irq:        Interrupt to update
     352             :  * @m:          cpumask pointer (NULL to clear the hint)
     353             :  *
     354             :  * Updates the affinity hint and if @m is not NULL it applies it as the
     355             :  * affinity of that interrupt.
     356             :  */
     357             : static inline int
     358             : irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
     359             : {
     360             :         return __irq_apply_affinity_hint(irq, m, true);
     361             : }
     362             : 
     363             : /*
     364             :  * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
     365             :  * instead.
     366             :  */
     367             : static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
     368             : {
     369             :         return irq_set_affinity_and_hint(irq, m);
     370             : }
     371             : 
     372             : extern int irq_update_affinity_desc(unsigned int irq,
     373             :                                     struct irq_affinity_desc *affinity);
     374             : 
     375             : extern int
     376             : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
     377             : 
     378             : struct irq_affinity_desc *
     379             : irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
     380             : 
     381             : unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
     382             :                                        const struct irq_affinity *affd);
     383             : 
     384             : #else /* CONFIG_SMP */
     385             : 
     386             : static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
     387             : {
     388             :         return -EINVAL;
     389             : }
     390             : 
     391             : static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
     392             : {
     393             :         return 0;
     394             : }
     395             : 
     396             : static inline int irq_can_set_affinity(unsigned int irq)
     397             : {
     398             :         return 0;
     399             : }
     400             : 
     401             : static inline int irq_select_affinity(unsigned int irq)  { return 0; }
     402             : 
     403             : static inline int irq_update_affinity_hint(unsigned int irq,
     404             :                                            const struct cpumask *m)
     405             : {
     406             :         return -EINVAL;
     407             : }
     408             : 
     409             : static inline int irq_set_affinity_and_hint(unsigned int irq,
     410             :                                             const struct cpumask *m)
     411             : {
     412             :         return -EINVAL;
     413             : }
     414             : 
     415             : static inline int irq_set_affinity_hint(unsigned int irq,
     416             :                                         const struct cpumask *m)
     417             : {
     418             :         return -EINVAL;
     419             : }
     420             : 
     421             : static inline int irq_update_affinity_desc(unsigned int irq,
     422             :                                            struct irq_affinity_desc *affinity)
     423             : {
     424             :         return -EINVAL;
     425             : }
     426             : 
     427             : static inline int
     428             : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
     429             : {
     430             :         return 0;
     431             : }
     432             : 
     433             : static inline struct irq_affinity_desc *
     434             : irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
     435             : {
     436             :         return NULL;
     437             : }
     438             : 
     439             : static inline unsigned int
     440             : irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
     441             :                           const struct irq_affinity *affd)
     442             : {
     443             :         return maxvec;
     444             : }
     445             : 
     446             : #endif /* CONFIG_SMP */
     447             : 
     448             : /*
     449             :  * Special lockdep variants of irq disabling/enabling.
     450             :  * These should be used for locking constructs that
     451             :  * know that a particular irq context which is disabled,
     452             :  * and which is the only irq-context user of a lock,
     453             :  * that it's safe to take the lock in the irq-disabled
     454             :  * section without disabling hardirqs.
     455             :  *
     456             :  * On !CONFIG_LOCKDEP they are equivalent to the normal
     457             :  * irq disable/enable methods.
     458             :  */
     459             : static inline void disable_irq_nosync_lockdep(unsigned int irq)
     460             : {
     461             :         disable_irq_nosync(irq);
     462             : #ifdef CONFIG_LOCKDEP
     463             :         local_irq_disable();
     464             : #endif
     465             : }
     466             : 
     467             : static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
     468             : {
     469             :         disable_irq_nosync(irq);
     470             : #ifdef CONFIG_LOCKDEP
     471             :         local_irq_save(*flags);
     472             : #endif
     473             : }
     474             : 
     475             : static inline void disable_irq_lockdep(unsigned int irq)
     476             : {
     477             :         disable_irq(irq);
     478             : #ifdef CONFIG_LOCKDEP
     479             :         local_irq_disable();
     480             : #endif
     481             : }
     482             : 
     483             : static inline void enable_irq_lockdep(unsigned int irq)
     484             : {
     485             : #ifdef CONFIG_LOCKDEP
     486             :         local_irq_enable();
     487             : #endif
     488             :         enable_irq(irq);
     489             : }
     490             : 
     491             : static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
     492             : {
     493             : #ifdef CONFIG_LOCKDEP
     494             :         local_irq_restore(*flags);
     495             : #endif
     496             :         enable_irq(irq);
     497             : }
     498             : 
     499             : /* IRQ wakeup (PM) control: */
     500             : extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
     501             : 
     502             : static inline int enable_irq_wake(unsigned int irq)
     503             : {
     504           0 :         return irq_set_irq_wake(irq, 1);
     505             : }
     506             : 
     507             : static inline int disable_irq_wake(unsigned int irq)
     508             : {
     509           0 :         return irq_set_irq_wake(irq, 0);
     510             : }
     511             : 
     512             : /*
     513             :  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
     514             :  */
     515             : enum irqchip_irq_state {
     516             :         IRQCHIP_STATE_PENDING,          /* Is interrupt pending? */
     517             :         IRQCHIP_STATE_ACTIVE,           /* Is interrupt in progress? */
     518             :         IRQCHIP_STATE_MASKED,           /* Is interrupt masked? */
     519             :         IRQCHIP_STATE_LINE_LEVEL,       /* Is IRQ line high? */
     520             : };
     521             : 
     522             : extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
     523             :                                  bool *state);
     524             : extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
     525             :                                  bool state);
     526             : 
     527             : #ifdef CONFIG_IRQ_FORCED_THREADING
     528             : # ifdef CONFIG_PREEMPT_RT
     529             : #  define force_irqthreads()    (true)
     530             : # else
     531             : DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
     532             : #  define force_irqthreads()    (static_branch_unlikely(&force_irqthreads_key))
     533             : # endif
     534             : #else
     535             : #define force_irqthreads()      (false)
     536             : #endif
     537             : 
     538             : #ifndef local_softirq_pending
     539             : 
     540             : #ifndef local_softirq_pending_ref
     541             : #define local_softirq_pending_ref irq_stat.__softirq_pending
     542             : #endif
     543             : 
     544             : #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
     545             : #define set_softirq_pending(x)  (__this_cpu_write(local_softirq_pending_ref, (x)))
     546             : #define or_softirq_pending(x)   (__this_cpu_or(local_softirq_pending_ref, (x)))
     547             : 
     548             : #endif /* local_softirq_pending */
     549             : 
     550             : /* Some architectures might implement lazy enabling/disabling of
     551             :  * interrupts. In some cases, such as stop_machine, we might want
     552             :  * to ensure that after a local_irq_disable(), interrupts have
     553             :  * really been disabled in hardware. Such architectures need to
     554             :  * implement the following hook.
     555             :  */
     556             : #ifndef hard_irq_disable
     557             : #define hard_irq_disable()      do { } while(0)
     558             : #endif
     559             : 
     560             : /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
     561             :    frequency threaded job scheduling. For almost all the purposes
     562             :    tasklets are more than enough. F.e. all serial device BHs et
     563             :    al. should be converted to tasklets, not to softirqs.
     564             :  */
     565             : 
     566             : enum
     567             : {
     568             :         HI_SOFTIRQ=0,
     569             :         TIMER_SOFTIRQ,
     570             :         NET_TX_SOFTIRQ,
     571             :         NET_RX_SOFTIRQ,
     572             :         BLOCK_SOFTIRQ,
     573             :         IRQ_POLL_SOFTIRQ,
     574             :         TASKLET_SOFTIRQ,
     575             :         SCHED_SOFTIRQ,
     576             :         HRTIMER_SOFTIRQ,
     577             :         RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
     578             : 
     579             :         NR_SOFTIRQS
     580             : };
     581             : 
     582             : /*
     583             :  * The following vectors can be safely ignored after ksoftirqd is parked:
     584             :  *
     585             :  * _ RCU:
     586             :  *      1) rcutree_migrate_callbacks() migrates the queue.
     587             :  *      2) rcu_report_dead() reports the final quiescent states.
     588             :  *
     589             :  * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
     590             :  */
     591             : #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
     592             : 
     593             : /* map softirq index to softirq name. update 'softirq_to_name' in
     594             :  * kernel/softirq.c when adding a new softirq.
     595             :  */
     596             : extern const char * const softirq_to_name[NR_SOFTIRQS];
     597             : 
     598             : /* softirq mask and active fields moved to irq_cpustat_t in
     599             :  * asm/hardirq.h to get better cache usage.  KAO
     600             :  */
     601             : 
     602             : struct softirq_action
     603             : {
     604             :         void    (*action)(struct softirq_action *);
     605             : };
     606             : 
     607             : asmlinkage void do_softirq(void);
     608             : asmlinkage void __do_softirq(void);
     609             : 
     610             : extern void open_softirq(int nr, void (*action)(struct softirq_action *));
     611             : extern void softirq_init(void);
     612             : extern void __raise_softirq_irqoff(unsigned int nr);
     613             : 
     614             : extern void raise_softirq_irqoff(unsigned int nr);
     615             : extern void raise_softirq(unsigned int nr);
     616             : 
     617             : DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
     618             : 
     619             : static inline struct task_struct *this_cpu_ksoftirqd(void)
     620             : {
     621             :         return this_cpu_read(ksoftirqd);
     622             : }
     623             : 
     624             : /* Tasklets --- multithreaded analogue of BHs.
     625             : 
     626             :    This API is deprecated. Please consider using threaded IRQs instead:
     627             :    https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
     628             : 
     629             :    Main feature differing them of generic softirqs: tasklet
     630             :    is running only on one CPU simultaneously.
     631             : 
     632             :    Main feature differing them of BHs: different tasklets
     633             :    may be run simultaneously on different CPUs.
     634             : 
     635             :    Properties:
     636             :    * If tasklet_schedule() is called, then tasklet is guaranteed
     637             :      to be executed on some cpu at least once after this.
     638             :    * If the tasklet is already scheduled, but its execution is still not
     639             :      started, it will be executed only once.
     640             :    * If this tasklet is already running on another CPU (or schedule is called
     641             :      from tasklet itself), it is rescheduled for later.
     642             :    * Tasklet is strictly serialized wrt itself, but not
     643             :      wrt another tasklets. If client needs some intertask synchronization,
     644             :      he makes it with spinlocks.
     645             :  */
     646             : 
     647             : struct tasklet_struct
     648             : {
     649             :         struct tasklet_struct *next;
     650             :         unsigned long state;
     651             :         atomic_t count;
     652             :         bool use_callback;
     653             :         union {
     654             :                 void (*func)(unsigned long data);
     655             :                 void (*callback)(struct tasklet_struct *t);
     656             :         };
     657             :         unsigned long data;
     658             : };
     659             : 
     660             : #define DECLARE_TASKLET(name, _callback)                \
     661             : struct tasklet_struct name = {                          \
     662             :         .count = ATOMIC_INIT(0),                        \
     663             :         .callback = _callback,                          \
     664             :         .use_callback = true,                           \
     665             : }
     666             : 
     667             : #define DECLARE_TASKLET_DISABLED(name, _callback)       \
     668             : struct tasklet_struct name = {                          \
     669             :         .count = ATOMIC_INIT(1),                        \
     670             :         .callback = _callback,                          \
     671             :         .use_callback = true,                           \
     672             : }
     673             : 
     674             : #define from_tasklet(var, callback_tasklet, tasklet_fieldname)  \
     675             :         container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
     676             : 
     677             : #define DECLARE_TASKLET_OLD(name, _func)                \
     678             : struct tasklet_struct name = {                          \
     679             :         .count = ATOMIC_INIT(0),                        \
     680             :         .func = _func,                                  \
     681             : }
     682             : 
     683             : #define DECLARE_TASKLET_DISABLED_OLD(name, _func)       \
     684             : struct tasklet_struct name = {                          \
     685             :         .count = ATOMIC_INIT(1),                        \
     686             :         .func = _func,                                  \
     687             : }
     688             : 
     689             : enum
     690             : {
     691             :         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
     692             :         TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
     693             : };
     694             : 
     695             : #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
     696             : static inline int tasklet_trylock(struct tasklet_struct *t)
     697             : {
     698             :         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
     699             : }
     700             : 
     701             : void tasklet_unlock(struct tasklet_struct *t);
     702             : void tasklet_unlock_wait(struct tasklet_struct *t);
     703             : void tasklet_unlock_spin_wait(struct tasklet_struct *t);
     704             : 
     705             : #else
     706             : static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
     707             : static inline void tasklet_unlock(struct tasklet_struct *t) { }
     708             : static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
     709             : static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
     710             : #endif
     711             : 
     712             : extern void __tasklet_schedule(struct tasklet_struct *t);
     713             : 
     714             : static inline void tasklet_schedule(struct tasklet_struct *t)
     715             : {
     716             :         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
     717             :                 __tasklet_schedule(t);
     718             : }
     719             : 
     720             : extern void __tasklet_hi_schedule(struct tasklet_struct *t);
     721             : 
     722             : static inline void tasklet_hi_schedule(struct tasklet_struct *t)
     723             : {
     724             :         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
     725             :                 __tasklet_hi_schedule(t);
     726             : }
     727             : 
     728             : static inline void tasklet_disable_nosync(struct tasklet_struct *t)
     729             : {
     730             :         atomic_inc(&t->count);
     731             :         smp_mb__after_atomic();
     732             : }
     733             : 
     734             : /*
     735             :  * Do not use in new code. Disabling tasklets from atomic contexts is
     736             :  * error prone and should be avoided.
     737             :  */
     738             : static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
     739             : {
     740             :         tasklet_disable_nosync(t);
     741             :         tasklet_unlock_spin_wait(t);
     742             :         smp_mb();
     743             : }
     744             : 
     745             : static inline void tasklet_disable(struct tasklet_struct *t)
     746             : {
     747             :         tasklet_disable_nosync(t);
     748             :         tasklet_unlock_wait(t);
     749             :         smp_mb();
     750             : }
     751             : 
     752             : static inline void tasklet_enable(struct tasklet_struct *t)
     753             : {
     754             :         smp_mb__before_atomic();
     755             :         atomic_dec(&t->count);
     756             : }
     757             : 
     758             : extern void tasklet_kill(struct tasklet_struct *t);
     759             : extern void tasklet_init(struct tasklet_struct *t,
     760             :                          void (*func)(unsigned long), unsigned long data);
     761             : extern void tasklet_setup(struct tasklet_struct *t,
     762             :                           void (*callback)(struct tasklet_struct *));
     763             : 
     764             : /*
     765             :  * Autoprobing for irqs:
     766             :  *
     767             :  * probe_irq_on() and probe_irq_off() provide robust primitives
     768             :  * for accurate IRQ probing during kernel initialization.  They are
     769             :  * reasonably simple to use, are not "fooled" by spurious interrupts,
     770             :  * and, unlike other attempts at IRQ probing, they do not get hung on
     771             :  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
     772             :  *
     773             :  * For reasonably foolproof probing, use them as follows:
     774             :  *
     775             :  * 1. clear and/or mask the device's internal interrupt.
     776             :  * 2. sti();
     777             :  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
     778             :  * 4. enable the device and cause it to trigger an interrupt.
     779             :  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
     780             :  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
     781             :  * 7. service the device to clear its pending interrupt.
     782             :  * 8. loop again if paranoia is required.
     783             :  *
     784             :  * probe_irq_on() returns a mask of allocated irq's.
     785             :  *
     786             :  * probe_irq_off() takes the mask as a parameter,
     787             :  * and returns the irq number which occurred,
     788             :  * or zero if none occurred, or a negative irq number
     789             :  * if more than one irq occurred.
     790             :  */
     791             : 
     792             : #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
     793             : static inline unsigned long probe_irq_on(void)
     794             : {
     795             :         return 0;
     796             : }
     797             : static inline int probe_irq_off(unsigned long val)
     798             : {
     799             :         return 0;
     800             : }
     801             : static inline unsigned int probe_irq_mask(unsigned long val)
     802             : {
     803             :         return 0;
     804             : }
     805             : #else
     806             : extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
     807             : extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
     808             : extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
     809             : #endif
     810             : 
     811             : #ifdef CONFIG_PROC_FS
     812             : /* Initialize /proc/irq/ */
     813             : extern void init_irq_proc(void);
     814             : #else
     815             : static inline void init_irq_proc(void)
     816             : {
     817             : }
     818             : #endif
     819             : 
     820             : #ifdef CONFIG_IRQ_TIMINGS
     821             : void irq_timings_enable(void);
     822             : void irq_timings_disable(void);
     823             : u64 irq_timings_next_event(u64 now);
     824             : #endif
     825             : 
     826             : struct seq_file;
     827             : int show_interrupts(struct seq_file *p, void *v);
     828             : int arch_show_interrupts(struct seq_file *p, int prec);
     829             : 
     830             : extern int early_irq_init(void);
     831             : extern int arch_probe_nr_irqs(void);
     832             : extern int arch_early_irq_init(void);
     833             : 
     834             : /*
     835             :  * We want to know which function is an entrypoint of a hardirq or a softirq.
     836             :  */
     837             : #ifndef __irq_entry
     838             : # define __irq_entry     __section(".irqentry.text")
     839             : #endif
     840             : 
     841             : #define __softirq_entry  __section(".softirqentry.text")
     842             : 
     843             : #endif

Generated by: LCOV version 1.14