LCOV - code coverage report
Current view: top level - fs - io_uring.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 3 4458 0.1 %
Date: 2022-12-09 01:23:36 Functions: 1 291 0.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Shared application/kernel submission and completion ring pairs, for
       4             :  * supporting fast/efficient IO.
       5             :  *
       6             :  * A note on the read/write ordering memory barriers that are matched between
       7             :  * the application and kernel side.
       8             :  *
       9             :  * After the application reads the CQ ring tail, it must use an
      10             :  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
      11             :  * before writing the tail (using smp_load_acquire to read the tail will
      12             :  * do). It also needs a smp_mb() before updating CQ head (ordering the
      13             :  * entry load(s) with the head store), pairing with an implicit barrier
      14             :  * through a control-dependency in io_get_cqe (smp_store_release to
      15             :  * store head will do). Failure to do so could lead to reading invalid
      16             :  * CQ entries.
      17             :  *
      18             :  * Likewise, the application must use an appropriate smp_wmb() before
      19             :  * writing the SQ tail (ordering SQ entry stores with the tail store),
      20             :  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
      21             :  * to store the tail will do). And it needs a barrier ordering the SQ
      22             :  * head load before writing new SQ entries (smp_load_acquire to read
      23             :  * head will do).
      24             :  *
      25             :  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
      26             :  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
      27             :  * updating the SQ tail; a full memory barrier smp_mb() is needed
      28             :  * between.
      29             :  *
      30             :  * Also see the examples in the liburing library:
      31             :  *
      32             :  *      git://git.kernel.dk/liburing
      33             :  *
      34             :  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
      35             :  * from data shared between the kernel and application. This is done both
      36             :  * for ordering purposes, but also to ensure that once a value is loaded from
      37             :  * data that the application could potentially modify, it remains stable.
      38             :  *
      39             :  * Copyright (C) 2018-2019 Jens Axboe
      40             :  * Copyright (c) 2018-2019 Christoph Hellwig
      41             :  */
      42             : #include <linux/kernel.h>
      43             : #include <linux/init.h>
      44             : #include <linux/errno.h>
      45             : #include <linux/syscalls.h>
      46             : #include <linux/compat.h>
      47             : #include <net/compat.h>
      48             : #include <linux/refcount.h>
      49             : #include <linux/uio.h>
      50             : #include <linux/bits.h>
      51             : 
      52             : #include <linux/sched/signal.h>
      53             : #include <linux/fs.h>
      54             : #include <linux/file.h>
      55             : #include <linux/fdtable.h>
      56             : #include <linux/mm.h>
      57             : #include <linux/mman.h>
      58             : #include <linux/percpu.h>
      59             : #include <linux/slab.h>
      60             : #include <linux/blk-mq.h>
      61             : #include <linux/bvec.h>
      62             : #include <linux/net.h>
      63             : #include <net/sock.h>
      64             : #include <net/af_unix.h>
      65             : #include <net/scm.h>
      66             : #include <linux/anon_inodes.h>
      67             : #include <linux/sched/mm.h>
      68             : #include <linux/uaccess.h>
      69             : #include <linux/nospec.h>
      70             : #include <linux/sizes.h>
      71             : #include <linux/hugetlb.h>
      72             : #include <linux/highmem.h>
      73             : #include <linux/namei.h>
      74             : #include <linux/fsnotify.h>
      75             : #include <linux/fadvise.h>
      76             : #include <linux/eventpoll.h>
      77             : #include <linux/splice.h>
      78             : #include <linux/task_work.h>
      79             : #include <linux/pagemap.h>
      80             : #include <linux/io_uring.h>
      81             : #include <linux/audit.h>
      82             : #include <linux/security.h>
      83             : 
      84             : #define CREATE_TRACE_POINTS
      85             : #include <trace/events/io_uring.h>
      86             : 
      87             : #include <uapi/linux/io_uring.h>
      88             : 
      89             : #include "internal.h"
      90             : #include "io-wq.h"
      91             : 
      92             : #define IORING_MAX_ENTRIES      32768
      93             : #define IORING_MAX_CQ_ENTRIES   (2 * IORING_MAX_ENTRIES)
      94             : #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
      95             : 
      96             : /* only define max */
      97             : #define IORING_MAX_FIXED_FILES  (1U << 15)
      98             : #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
      99             :                                  IORING_REGISTER_LAST + IORING_OP_LAST)
     100             : 
     101             : #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
     102             : #define IO_RSRC_TAG_TABLE_MAX   (1U << IO_RSRC_TAG_TABLE_SHIFT)
     103             : #define IO_RSRC_TAG_TABLE_MASK  (IO_RSRC_TAG_TABLE_MAX - 1)
     104             : 
     105             : #define IORING_MAX_REG_BUFFERS  (1U << 14)
     106             : 
     107             : #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
     108             :                           IOSQE_IO_HARDLINK | IOSQE_ASYNC)
     109             : 
     110             : #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
     111             :                         IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
     112             : 
     113             : #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
     114             :                                 REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
     115             : 
     116             : #define IO_TCTX_REFS_CACHE_NR   (1U << 10)
     117             : 
     118             : struct io_uring {
     119             :         u32 head ____cacheline_aligned_in_smp;
     120             :         u32 tail ____cacheline_aligned_in_smp;
     121             : };
     122             : 
     123             : /*
     124             :  * This data is shared with the application through the mmap at offsets
     125             :  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
     126             :  *
     127             :  * The offsets to the member fields are published through struct
     128             :  * io_sqring_offsets when calling io_uring_setup.
     129             :  */
     130             : struct io_rings {
     131             :         /*
     132             :          * Head and tail offsets into the ring; the offsets need to be
     133             :          * masked to get valid indices.
     134             :          *
     135             :          * The kernel controls head of the sq ring and the tail of the cq ring,
     136             :          * and the application controls tail of the sq ring and the head of the
     137             :          * cq ring.
     138             :          */
     139             :         struct io_uring         sq, cq;
     140             :         /*
     141             :          * Bitmasks to apply to head and tail offsets (constant, equals
     142             :          * ring_entries - 1)
     143             :          */
     144             :         u32                     sq_ring_mask, cq_ring_mask;
     145             :         /* Ring sizes (constant, power of 2) */
     146             :         u32                     sq_ring_entries, cq_ring_entries;
     147             :         /*
     148             :          * Number of invalid entries dropped by the kernel due to
     149             :          * invalid index stored in array
     150             :          *
     151             :          * Written by the kernel, shouldn't be modified by the
     152             :          * application (i.e. get number of "new events" by comparing to
     153             :          * cached value).
     154             :          *
     155             :          * After a new SQ head value was read by the application this
     156             :          * counter includes all submissions that were dropped reaching
     157             :          * the new SQ head (and possibly more).
     158             :          */
     159             :         u32                     sq_dropped;
     160             :         /*
     161             :          * Runtime SQ flags
     162             :          *
     163             :          * Written by the kernel, shouldn't be modified by the
     164             :          * application.
     165             :          *
     166             :          * The application needs a full memory barrier before checking
     167             :          * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
     168             :          */
     169             :         u32                     sq_flags;
     170             :         /*
     171             :          * Runtime CQ flags
     172             :          *
     173             :          * Written by the application, shouldn't be modified by the
     174             :          * kernel.
     175             :          */
     176             :         u32                     cq_flags;
     177             :         /*
     178             :          * Number of completion events lost because the queue was full;
     179             :          * this should be avoided by the application by making sure
     180             :          * there are not more requests pending than there is space in
     181             :          * the completion queue.
     182             :          *
     183             :          * Written by the kernel, shouldn't be modified by the
     184             :          * application (i.e. get number of "new events" by comparing to
     185             :          * cached value).
     186             :          *
     187             :          * As completion events come in out of order this counter is not
     188             :          * ordered with any other data.
     189             :          */
     190             :         u32                     cq_overflow;
     191             :         /*
     192             :          * Ring buffer of completion events.
     193             :          *
     194             :          * The kernel writes completion events fresh every time they are
     195             :          * produced, so the application is allowed to modify pending
     196             :          * entries.
     197             :          */
     198             :         struct io_uring_cqe     cqes[] ____cacheline_aligned_in_smp;
     199             : };
     200             : 
     201             : enum io_uring_cmd_flags {
     202             :         IO_URING_F_COMPLETE_DEFER       = 1,
     203             :         IO_URING_F_UNLOCKED             = 2,
     204             :         /* int's last bit, sign checks are usually faster than a bit test */
     205             :         IO_URING_F_NONBLOCK             = INT_MIN,
     206             : };
     207             : 
     208             : struct io_mapped_ubuf {
     209             :         u64             ubuf;
     210             :         u64             ubuf_end;
     211             :         unsigned int    nr_bvecs;
     212             :         unsigned long   acct_pages;
     213             :         struct bio_vec  bvec[];
     214             : };
     215             : 
     216             : struct io_ring_ctx;
     217             : 
     218             : struct io_overflow_cqe {
     219             :         struct io_uring_cqe cqe;
     220             :         struct list_head list;
     221             : };
     222             : 
     223             : struct io_fixed_file {
     224             :         /* file * with additional FFS_* flags */
     225             :         unsigned long file_ptr;
     226             : };
     227             : 
     228             : struct io_rsrc_put {
     229             :         struct list_head list;
     230             :         u64 tag;
     231             :         union {
     232             :                 void *rsrc;
     233             :                 struct file *file;
     234             :                 struct io_mapped_ubuf *buf;
     235             :         };
     236             : };
     237             : 
     238             : struct io_file_table {
     239             :         struct io_fixed_file *files;
     240             : };
     241             : 
     242             : struct io_rsrc_node {
     243             :         struct percpu_ref               refs;
     244             :         struct list_head                node;
     245             :         struct list_head                rsrc_list;
     246             :         struct io_rsrc_data             *rsrc_data;
     247             :         struct llist_node               llist;
     248             :         bool                            done;
     249             : };
     250             : 
     251             : typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
     252             : 
     253             : struct io_rsrc_data {
     254             :         struct io_ring_ctx              *ctx;
     255             : 
     256             :         u64                             **tags;
     257             :         unsigned int                    nr;
     258             :         rsrc_put_fn                     *do_put;
     259             :         atomic_t                        refs;
     260             :         struct completion               done;
     261             :         bool                            quiesce;
     262             : };
     263             : 
     264             : struct io_buffer_list {
     265             :         struct list_head list;
     266             :         struct list_head buf_list;
     267             :         __u16 bgid;
     268             : };
     269             : 
     270             : struct io_buffer {
     271             :         struct list_head list;
     272             :         __u64 addr;
     273             :         __u32 len;
     274             :         __u16 bid;
     275             :         __u16 bgid;
     276             : };
     277             : 
     278             : struct io_restriction {
     279             :         DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
     280             :         DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
     281             :         u8 sqe_flags_allowed;
     282             :         u8 sqe_flags_required;
     283             :         bool registered;
     284             : };
     285             : 
     286             : enum {
     287             :         IO_SQ_THREAD_SHOULD_STOP = 0,
     288             :         IO_SQ_THREAD_SHOULD_PARK,
     289             : };
     290             : 
     291             : struct io_sq_data {
     292             :         refcount_t              refs;
     293             :         atomic_t                park_pending;
     294             :         struct mutex            lock;
     295             : 
     296             :         /* ctx's that are using this sqd */
     297             :         struct list_head        ctx_list;
     298             : 
     299             :         struct task_struct      *thread;
     300             :         struct wait_queue_head  wait;
     301             : 
     302             :         unsigned                sq_thread_idle;
     303             :         int                     sq_cpu;
     304             :         pid_t                   task_pid;
     305             :         pid_t                   task_tgid;
     306             : 
     307             :         unsigned long           state;
     308             :         struct completion       exited;
     309             : };
     310             : 
     311             : #define IO_COMPL_BATCH                  32
     312             : #define IO_REQ_CACHE_SIZE               32
     313             : #define IO_REQ_ALLOC_BATCH              8
     314             : 
     315             : struct io_submit_link {
     316             :         struct io_kiocb         *head;
     317             :         struct io_kiocb         *last;
     318             : };
     319             : 
     320             : struct io_submit_state {
     321             :         /* inline/task_work completion list, under ->uring_lock */
     322             :         struct io_wq_work_node  free_list;
     323             :         /* batch completion logic */
     324             :         struct io_wq_work_list  compl_reqs;
     325             :         struct io_submit_link   link;
     326             : 
     327             :         bool                    plug_started;
     328             :         bool                    need_plug;
     329             :         bool                    flush_cqes;
     330             :         unsigned short          submit_nr;
     331             :         struct blk_plug         plug;
     332             : };
     333             : 
     334             : struct io_ev_fd {
     335             :         struct eventfd_ctx      *cq_ev_fd;
     336             :         unsigned int            eventfd_async: 1;
     337             :         struct rcu_head         rcu;
     338             : };
     339             : 
     340             : #define IO_BUFFERS_HASH_BITS    5
     341             : 
     342             : struct io_ring_ctx {
     343             :         /* const or read-mostly hot data */
     344             :         struct {
     345             :                 struct percpu_ref       refs;
     346             : 
     347             :                 struct io_rings         *rings;
     348             :                 unsigned int            flags;
     349             :                 unsigned int            compat: 1;
     350             :                 unsigned int            drain_next: 1;
     351             :                 unsigned int            restricted: 1;
     352             :                 unsigned int            off_timeout_used: 1;
     353             :                 unsigned int            drain_active: 1;
     354             :                 unsigned int            drain_disabled: 1;
     355             :                 unsigned int            has_evfd: 1;
     356             :         } ____cacheline_aligned_in_smp;
     357             : 
     358             :         /* submission data */
     359             :         struct {
     360             :                 struct mutex            uring_lock;
     361             : 
     362             :                 /*
     363             :                  * Ring buffer of indices into array of io_uring_sqe, which is
     364             :                  * mmapped by the application using the IORING_OFF_SQES offset.
     365             :                  *
     366             :                  * This indirection could e.g. be used to assign fixed
     367             :                  * io_uring_sqe entries to operations and only submit them to
     368             :                  * the queue when needed.
     369             :                  *
     370             :                  * The kernel modifies neither the indices array nor the entries
     371             :                  * array.
     372             :                  */
     373             :                 u32                     *sq_array;
     374             :                 struct io_uring_sqe     *sq_sqes;
     375             :                 unsigned                cached_sq_head;
     376             :                 unsigned                sq_entries;
     377             :                 struct list_head        defer_list;
     378             : 
     379             :                 /*
     380             :                  * Fixed resources fast path, should be accessed only under
     381             :                  * uring_lock, and updated through io_uring_register(2)
     382             :                  */
     383             :                 struct io_rsrc_node     *rsrc_node;
     384             :                 int                     rsrc_cached_refs;
     385             :                 struct io_file_table    file_table;
     386             :                 unsigned                nr_user_files;
     387             :                 unsigned                nr_user_bufs;
     388             :                 struct io_mapped_ubuf   **user_bufs;
     389             : 
     390             :                 struct io_submit_state  submit_state;
     391             :                 struct list_head        timeout_list;
     392             :                 struct list_head        ltimeout_list;
     393             :                 struct list_head        cq_overflow_list;
     394             :                 struct list_head        *io_buffers;
     395             :                 struct list_head        io_buffers_cache;
     396             :                 struct list_head        apoll_cache;
     397             :                 struct xarray           personalities;
     398             :                 u32                     pers_next;
     399             :                 unsigned                sq_thread_idle;
     400             :         } ____cacheline_aligned_in_smp;
     401             : 
     402             :         /* IRQ completion list, under ->completion_lock */
     403             :         struct io_wq_work_list  locked_free_list;
     404             :         unsigned int            locked_free_nr;
     405             : 
     406             :         const struct cred       *sq_creds;      /* cred used for __io_sq_thread() */
     407             :         struct io_sq_data       *sq_data;       /* if using sq thread polling */
     408             : 
     409             :         struct wait_queue_head  sqo_sq_wait;
     410             :         struct list_head        sqd_list;
     411             : 
     412             :         unsigned long           check_cq_overflow;
     413             : 
     414             :         struct {
     415             :                 unsigned                cached_cq_tail;
     416             :                 unsigned                cq_entries;
     417             :                 struct io_ev_fd __rcu   *io_ev_fd;
     418             :                 struct wait_queue_head  cq_wait;
     419             :                 unsigned                cq_extra;
     420             :                 atomic_t                cq_timeouts;
     421             :                 unsigned                cq_last_tm_flush;
     422             :         } ____cacheline_aligned_in_smp;
     423             : 
     424             :         struct {
     425             :                 spinlock_t              completion_lock;
     426             : 
     427             :                 spinlock_t              timeout_lock;
     428             : 
     429             :                 /*
     430             :                  * ->iopoll_list is protected by the ctx->uring_lock for
     431             :                  * io_uring instances that don't use IORING_SETUP_SQPOLL.
     432             :                  * For SQPOLL, only the single threaded io_sq_thread() will
     433             :                  * manipulate the list, hence no extra locking is needed there.
     434             :                  */
     435             :                 struct io_wq_work_list  iopoll_list;
     436             :                 struct hlist_head       *cancel_hash;
     437             :                 unsigned                cancel_hash_bits;
     438             :                 bool                    poll_multi_queue;
     439             : 
     440             :                 struct list_head        io_buffers_comp;
     441             :         } ____cacheline_aligned_in_smp;
     442             : 
     443             :         struct io_restriction           restrictions;
     444             : 
     445             :         /* slow path rsrc auxilary data, used by update/register */
     446             :         struct {
     447             :                 struct io_rsrc_node             *rsrc_backup_node;
     448             :                 struct io_mapped_ubuf           *dummy_ubuf;
     449             :                 struct io_rsrc_data             *file_data;
     450             :                 struct io_rsrc_data             *buf_data;
     451             : 
     452             :                 struct delayed_work             rsrc_put_work;
     453             :                 struct llist_head               rsrc_put_llist;
     454             :                 struct list_head                rsrc_ref_list;
     455             :                 spinlock_t                      rsrc_ref_lock;
     456             : 
     457             :                 struct list_head        io_buffers_pages;
     458             :         };
     459             : 
     460             :         /* Keep this last, we don't need it for the fast path */
     461             :         struct {
     462             :                 #if defined(CONFIG_UNIX)
     463             :                         struct socket           *ring_sock;
     464             :                 #endif
     465             :                 /* hashed buffered write serialization */
     466             :                 struct io_wq_hash               *hash_map;
     467             : 
     468             :                 /* Only used for accounting purposes */
     469             :                 struct user_struct              *user;
     470             :                 struct mm_struct                *mm_account;
     471             : 
     472             :                 /* ctx exit and cancelation */
     473             :                 struct llist_head               fallback_llist;
     474             :                 struct delayed_work             fallback_work;
     475             :                 struct work_struct              exit_work;
     476             :                 struct list_head                tctx_list;
     477             :                 struct completion               ref_comp;
     478             :                 u32                             iowq_limits[2];
     479             :                 bool                            iowq_limits_set;
     480             :         };
     481             : };
     482             : 
     483             : /*
     484             :  * Arbitrary limit, can be raised if need be
     485             :  */
     486             : #define IO_RINGFD_REG_MAX 16
     487             : 
     488             : struct io_uring_task {
     489             :         /* submission side */
     490             :         int                     cached_refs;
     491             :         struct xarray           xa;
     492             :         struct wait_queue_head  wait;
     493             :         const struct io_ring_ctx *last;
     494             :         struct io_wq            *io_wq;
     495             :         struct percpu_counter   inflight;
     496             :         atomic_t                in_idle;
     497             : 
     498             :         spinlock_t              task_lock;
     499             :         struct io_wq_work_list  task_list;
     500             :         struct io_wq_work_list  prior_task_list;
     501             :         struct callback_head    task_work;
     502             :         struct file             **registered_rings;
     503             :         bool                    task_running;
     504             : };
     505             : 
     506             : /*
     507             :  * First field must be the file pointer in all the
     508             :  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
     509             :  */
     510             : struct io_poll_iocb {
     511             :         struct file                     *file;
     512             :         struct wait_queue_head          *head;
     513             :         __poll_t                        events;
     514             :         struct wait_queue_entry         wait;
     515             : };
     516             : 
     517             : struct io_poll_update {
     518             :         struct file                     *file;
     519             :         u64                             old_user_data;
     520             :         u64                             new_user_data;
     521             :         __poll_t                        events;
     522             :         bool                            update_events;
     523             :         bool                            update_user_data;
     524             : };
     525             : 
     526             : struct io_close {
     527             :         struct file                     *file;
     528             :         int                             fd;
     529             :         u32                             file_slot;
     530             : };
     531             : 
     532             : struct io_timeout_data {
     533             :         struct io_kiocb                 *req;
     534             :         struct hrtimer                  timer;
     535             :         struct timespec64               ts;
     536             :         enum hrtimer_mode               mode;
     537             :         u32                             flags;
     538             : };
     539             : 
     540             : struct io_accept {
     541             :         struct file                     *file;
     542             :         struct sockaddr __user          *addr;
     543             :         int __user                      *addr_len;
     544             :         int                             flags;
     545             :         u32                             file_slot;
     546             :         unsigned long                   nofile;
     547             : };
     548             : 
     549             : struct io_sync {
     550             :         struct file                     *file;
     551             :         loff_t                          len;
     552             :         loff_t                          off;
     553             :         int                             flags;
     554             :         int                             mode;
     555             : };
     556             : 
     557             : struct io_cancel {
     558             :         struct file                     *file;
     559             :         u64                             addr;
     560             : };
     561             : 
     562             : struct io_timeout {
     563             :         struct file                     *file;
     564             :         u32                             off;
     565             :         u32                             target_seq;
     566             :         struct list_head                list;
     567             :         /* head of the link, used by linked timeouts only */
     568             :         struct io_kiocb                 *head;
     569             :         /* for linked completions */
     570             :         struct io_kiocb                 *prev;
     571             : };
     572             : 
     573             : struct io_timeout_rem {
     574             :         struct file                     *file;
     575             :         u64                             addr;
     576             : 
     577             :         /* timeout update */
     578             :         struct timespec64               ts;
     579             :         u32                             flags;
     580             :         bool                            ltimeout;
     581             : };
     582             : 
     583             : struct io_rw {
     584             :         /* NOTE: kiocb has the file as the first member, so don't do it here */
     585             :         struct kiocb                    kiocb;
     586             :         u64                             addr;
     587             :         u32                             len;
     588             :         u32                             flags;
     589             : };
     590             : 
     591             : struct io_connect {
     592             :         struct file                     *file;
     593             :         struct sockaddr __user          *addr;
     594             :         int                             addr_len;
     595             : };
     596             : 
     597             : struct io_sr_msg {
     598             :         struct file                     *file;
     599             :         union {
     600             :                 struct compat_msghdr __user     *umsg_compat;
     601             :                 struct user_msghdr __user       *umsg;
     602             :                 void __user                     *buf;
     603             :         };
     604             :         int                             msg_flags;
     605             :         int                             bgid;
     606             :         size_t                          len;
     607             :         size_t                          done_io;
     608             : };
     609             : 
     610             : struct io_open {
     611             :         struct file                     *file;
     612             :         int                             dfd;
     613             :         u32                             file_slot;
     614             :         struct filename                 *filename;
     615             :         struct open_how                 how;
     616             :         unsigned long                   nofile;
     617             : };
     618             : 
     619             : struct io_rsrc_update {
     620             :         struct file                     *file;
     621             :         u64                             arg;
     622             :         u32                             nr_args;
     623             :         u32                             offset;
     624             : };
     625             : 
     626             : struct io_fadvise {
     627             :         struct file                     *file;
     628             :         u64                             offset;
     629             :         u32                             len;
     630             :         u32                             advice;
     631             : };
     632             : 
     633             : struct io_madvise {
     634             :         struct file                     *file;
     635             :         u64                             addr;
     636             :         u32                             len;
     637             :         u32                             advice;
     638             : };
     639             : 
     640             : struct io_epoll {
     641             :         struct file                     *file;
     642             :         int                             epfd;
     643             :         int                             op;
     644             :         int                             fd;
     645             :         struct epoll_event              event;
     646             : };
     647             : 
     648             : struct io_splice {
     649             :         struct file                     *file_out;
     650             :         loff_t                          off_out;
     651             :         loff_t                          off_in;
     652             :         u64                             len;
     653             :         int                             splice_fd_in;
     654             :         unsigned int                    flags;
     655             : };
     656             : 
     657             : struct io_provide_buf {
     658             :         struct file                     *file;
     659             :         __u64                           addr;
     660             :         __u32                           len;
     661             :         __u32                           bgid;
     662             :         __u16                           nbufs;
     663             :         __u16                           bid;
     664             : };
     665             : 
     666             : struct io_statx {
     667             :         struct file                     *file;
     668             :         int                             dfd;
     669             :         unsigned int                    mask;
     670             :         unsigned int                    flags;
     671             :         struct filename                 *filename;
     672             :         struct statx __user             *buffer;
     673             : };
     674             : 
     675             : struct io_shutdown {
     676             :         struct file                     *file;
     677             :         int                             how;
     678             : };
     679             : 
     680             : struct io_rename {
     681             :         struct file                     *file;
     682             :         int                             old_dfd;
     683             :         int                             new_dfd;
     684             :         struct filename                 *oldpath;
     685             :         struct filename                 *newpath;
     686             :         int                             flags;
     687             : };
     688             : 
     689             : struct io_unlink {
     690             :         struct file                     *file;
     691             :         int                             dfd;
     692             :         int                             flags;
     693             :         struct filename                 *filename;
     694             : };
     695             : 
     696             : struct io_mkdir {
     697             :         struct file                     *file;
     698             :         int                             dfd;
     699             :         umode_t                         mode;
     700             :         struct filename                 *filename;
     701             : };
     702             : 
     703             : struct io_symlink {
     704             :         struct file                     *file;
     705             :         int                             new_dfd;
     706             :         struct filename                 *oldpath;
     707             :         struct filename                 *newpath;
     708             : };
     709             : 
     710             : struct io_hardlink {
     711             :         struct file                     *file;
     712             :         int                             old_dfd;
     713             :         int                             new_dfd;
     714             :         struct filename                 *oldpath;
     715             :         struct filename                 *newpath;
     716             :         int                             flags;
     717             : };
     718             : 
     719             : struct io_msg {
     720             :         struct file                     *file;
     721             :         u64 user_data;
     722             :         u32 len;
     723             : };
     724             : 
     725             : struct io_async_connect {
     726             :         struct sockaddr_storage         address;
     727             : };
     728             : 
     729             : struct io_async_msghdr {
     730             :         struct iovec                    fast_iov[UIO_FASTIOV];
     731             :         /* points to an allocated iov, if NULL we use fast_iov instead */
     732             :         struct iovec                    *free_iov;
     733             :         struct sockaddr __user          *uaddr;
     734             :         struct msghdr                   msg;
     735             :         struct sockaddr_storage         addr;
     736             : };
     737             : 
     738             : struct io_rw_state {
     739             :         struct iov_iter                 iter;
     740             :         struct iov_iter_state           iter_state;
     741             :         struct iovec                    fast_iov[UIO_FASTIOV];
     742             : };
     743             : 
     744             : struct io_async_rw {
     745             :         struct io_rw_state              s;
     746             :         const struct iovec              *free_iovec;
     747             :         size_t                          bytes_done;
     748             :         struct wait_page_queue          wpq;
     749             : };
     750             : 
     751             : enum {
     752             :         REQ_F_FIXED_FILE_BIT    = IOSQE_FIXED_FILE_BIT,
     753             :         REQ_F_IO_DRAIN_BIT      = IOSQE_IO_DRAIN_BIT,
     754             :         REQ_F_LINK_BIT          = IOSQE_IO_LINK_BIT,
     755             :         REQ_F_HARDLINK_BIT      = IOSQE_IO_HARDLINK_BIT,
     756             :         REQ_F_FORCE_ASYNC_BIT   = IOSQE_ASYNC_BIT,
     757             :         REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
     758             :         REQ_F_CQE_SKIP_BIT      = IOSQE_CQE_SKIP_SUCCESS_BIT,
     759             : 
     760             :         /* first byte is taken by user flags, shift it to not overlap */
     761             :         REQ_F_FAIL_BIT          = 8,
     762             :         REQ_F_INFLIGHT_BIT,
     763             :         REQ_F_CUR_POS_BIT,
     764             :         REQ_F_NOWAIT_BIT,
     765             :         REQ_F_LINK_TIMEOUT_BIT,
     766             :         REQ_F_NEED_CLEANUP_BIT,
     767             :         REQ_F_POLLED_BIT,
     768             :         REQ_F_BUFFER_SELECTED_BIT,
     769             :         REQ_F_COMPLETE_INLINE_BIT,
     770             :         REQ_F_REISSUE_BIT,
     771             :         REQ_F_CREDS_BIT,
     772             :         REQ_F_REFCOUNT_BIT,
     773             :         REQ_F_ARM_LTIMEOUT_BIT,
     774             :         REQ_F_ASYNC_DATA_BIT,
     775             :         REQ_F_SKIP_LINK_CQES_BIT,
     776             :         REQ_F_SINGLE_POLL_BIT,
     777             :         REQ_F_DOUBLE_POLL_BIT,
     778             :         REQ_F_PARTIAL_IO_BIT,
     779             :         /* keep async read/write and isreg together and in order */
     780             :         REQ_F_SUPPORT_NOWAIT_BIT,
     781             :         REQ_F_ISREG_BIT,
     782             : 
     783             :         /* not a real bit, just to check we're not overflowing the space */
     784             :         __REQ_F_LAST_BIT,
     785             : };
     786             : 
     787             : enum {
     788             :         /* ctx owns file */
     789             :         REQ_F_FIXED_FILE        = BIT(REQ_F_FIXED_FILE_BIT),
     790             :         /* drain existing IO first */
     791             :         REQ_F_IO_DRAIN          = BIT(REQ_F_IO_DRAIN_BIT),
     792             :         /* linked sqes */
     793             :         REQ_F_LINK              = BIT(REQ_F_LINK_BIT),
     794             :         /* doesn't sever on completion < 0 */
     795             :         REQ_F_HARDLINK          = BIT(REQ_F_HARDLINK_BIT),
     796             :         /* IOSQE_ASYNC */
     797             :         REQ_F_FORCE_ASYNC       = BIT(REQ_F_FORCE_ASYNC_BIT),
     798             :         /* IOSQE_BUFFER_SELECT */
     799             :         REQ_F_BUFFER_SELECT     = BIT(REQ_F_BUFFER_SELECT_BIT),
     800             :         /* IOSQE_CQE_SKIP_SUCCESS */
     801             :         REQ_F_CQE_SKIP          = BIT(REQ_F_CQE_SKIP_BIT),
     802             : 
     803             :         /* fail rest of links */
     804             :         REQ_F_FAIL              = BIT(REQ_F_FAIL_BIT),
     805             :         /* on inflight list, should be cancelled and waited on exit reliably */
     806             :         REQ_F_INFLIGHT          = BIT(REQ_F_INFLIGHT_BIT),
     807             :         /* read/write uses file position */
     808             :         REQ_F_CUR_POS           = BIT(REQ_F_CUR_POS_BIT),
     809             :         /* must not punt to workers */
     810             :         REQ_F_NOWAIT            = BIT(REQ_F_NOWAIT_BIT),
     811             :         /* has or had linked timeout */
     812             :         REQ_F_LINK_TIMEOUT      = BIT(REQ_F_LINK_TIMEOUT_BIT),
     813             :         /* needs cleanup */
     814             :         REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
     815             :         /* already went through poll handler */
     816             :         REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
     817             :         /* buffer already selected */
     818             :         REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
     819             :         /* completion is deferred through io_comp_state */
     820             :         REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
     821             :         /* caller should reissue async */
     822             :         REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
     823             :         /* supports async reads/writes */
     824             :         REQ_F_SUPPORT_NOWAIT    = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
     825             :         /* regular file */
     826             :         REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
     827             :         /* has creds assigned */
     828             :         REQ_F_CREDS             = BIT(REQ_F_CREDS_BIT),
     829             :         /* skip refcounting if not set */
     830             :         REQ_F_REFCOUNT          = BIT(REQ_F_REFCOUNT_BIT),
     831             :         /* there is a linked timeout that has to be armed */
     832             :         REQ_F_ARM_LTIMEOUT      = BIT(REQ_F_ARM_LTIMEOUT_BIT),
     833             :         /* ->async_data allocated */
     834             :         REQ_F_ASYNC_DATA        = BIT(REQ_F_ASYNC_DATA_BIT),
     835             :         /* don't post CQEs while failing linked requests */
     836             :         REQ_F_SKIP_LINK_CQES    = BIT(REQ_F_SKIP_LINK_CQES_BIT),
     837             :         /* single poll may be active */
     838             :         REQ_F_SINGLE_POLL       = BIT(REQ_F_SINGLE_POLL_BIT),
     839             :         /* double poll may active */
     840             :         REQ_F_DOUBLE_POLL       = BIT(REQ_F_DOUBLE_POLL_BIT),
     841             :         /* request has already done partial IO */
     842             :         REQ_F_PARTIAL_IO        = BIT(REQ_F_PARTIAL_IO_BIT),
     843             : };
     844             : 
     845             : struct async_poll {
     846             :         struct io_poll_iocb     poll;
     847             :         struct io_poll_iocb     *double_poll;
     848             : };
     849             : 
     850             : typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
     851             : 
     852             : struct io_task_work {
     853             :         union {
     854             :                 struct io_wq_work_node  node;
     855             :                 struct llist_node       fallback_node;
     856             :         };
     857             :         io_req_tw_func_t                func;
     858             : };
     859             : 
     860             : enum {
     861             :         IORING_RSRC_FILE                = 0,
     862             :         IORING_RSRC_BUFFER              = 1,
     863             : };
     864             : 
     865             : /*
     866             :  * NOTE! Each of the iocb union members has the file pointer
     867             :  * as the first entry in their struct definition. So you can
     868             :  * access the file pointer through any of the sub-structs,
     869             :  * or directly as just 'file' in this struct.
     870             :  */
     871             : struct io_kiocb {
     872             :         union {
     873             :                 struct file             *file;
     874             :                 struct io_rw            rw;
     875             :                 struct io_poll_iocb     poll;
     876             :                 struct io_poll_update   poll_update;
     877             :                 struct io_accept        accept;
     878             :                 struct io_sync          sync;
     879             :                 struct io_cancel        cancel;
     880             :                 struct io_timeout       timeout;
     881             :                 struct io_timeout_rem   timeout_rem;
     882             :                 struct io_connect       connect;
     883             :                 struct io_sr_msg        sr_msg;
     884             :                 struct io_open          open;
     885             :                 struct io_close         close;
     886             :                 struct io_rsrc_update   rsrc_update;
     887             :                 struct io_fadvise       fadvise;
     888             :                 struct io_madvise       madvise;
     889             :                 struct io_epoll         epoll;
     890             :                 struct io_splice        splice;
     891             :                 struct io_provide_buf   pbuf;
     892             :                 struct io_statx         statx;
     893             :                 struct io_shutdown      shutdown;
     894             :                 struct io_rename        rename;
     895             :                 struct io_unlink        unlink;
     896             :                 struct io_mkdir         mkdir;
     897             :                 struct io_symlink       symlink;
     898             :                 struct io_hardlink      hardlink;
     899             :                 struct io_msg           msg;
     900             :         };
     901             : 
     902             :         u8                              opcode;
     903             :         /* polled IO has completed */
     904             :         u8                              iopoll_completed;
     905             :         u16                             buf_index;
     906             :         unsigned int                    flags;
     907             : 
     908             :         u64                             user_data;
     909             :         u32                             result;
     910             :         /* fd initially, then cflags for completion */
     911             :         union {
     912             :                 u32                     cflags;
     913             :                 int                     fd;
     914             :         };
     915             : 
     916             :         struct io_ring_ctx              *ctx;
     917             :         struct task_struct              *task;
     918             : 
     919             :         struct percpu_ref               *fixed_rsrc_refs;
     920             :         /* store used ubuf, so we can prevent reloading */
     921             :         struct io_mapped_ubuf           *imu;
     922             : 
     923             :         union {
     924             :                 /* used by request caches, completion batching and iopoll */
     925             :                 struct io_wq_work_node  comp_list;
     926             :                 /* cache ->apoll->events */
     927             :                 int apoll_events;
     928             :         };
     929             :         atomic_t                        refs;
     930             :         atomic_t                        poll_refs;
     931             :         struct io_task_work             io_task_work;
     932             :         /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
     933             :         struct hlist_node               hash_node;
     934             :         /* internal polling, see IORING_FEAT_FAST_POLL */
     935             :         struct async_poll               *apoll;
     936             :         /* opcode allocated if it needs to store data for async defer */
     937             :         void                            *async_data;
     938             :         /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
     939             :         struct io_buffer                *kbuf;
     940             :         /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
     941             :         struct io_kiocb                 *link;
     942             :         /* custom credentials, valid IFF REQ_F_CREDS is set */
     943             :         const struct cred               *creds;
     944             :         struct io_wq_work               work;
     945             : };
     946             : 
     947             : struct io_tctx_node {
     948             :         struct list_head        ctx_node;
     949             :         struct task_struct      *task;
     950             :         struct io_ring_ctx      *ctx;
     951             : };
     952             : 
     953             : struct io_defer_entry {
     954             :         struct list_head        list;
     955             :         struct io_kiocb         *req;
     956             :         u32                     seq;
     957             : };
     958             : 
     959             : struct io_op_def {
     960             :         /* needs req->file assigned */
     961             :         unsigned                needs_file : 1;
     962             :         /* should block plug */
     963             :         unsigned                plug : 1;
     964             :         /* hash wq insertion if file is a regular file */
     965             :         unsigned                hash_reg_file : 1;
     966             :         /* unbound wq insertion if file is a non-regular file */
     967             :         unsigned                unbound_nonreg_file : 1;
     968             :         /* set if opcode supports polled "wait" */
     969             :         unsigned                pollin : 1;
     970             :         unsigned                pollout : 1;
     971             :         unsigned                poll_exclusive : 1;
     972             :         /* op supports buffer selection */
     973             :         unsigned                buffer_select : 1;
     974             :         /* do prep async if is going to be punted */
     975             :         unsigned                needs_async_setup : 1;
     976             :         /* opcode is not supported by this kernel */
     977             :         unsigned                not_supported : 1;
     978             :         /* skip auditing */
     979             :         unsigned                audit_skip : 1;
     980             :         /* size of async data needed, if any */
     981             :         unsigned short          async_size;
     982             : };
     983             : 
     984             : static const struct io_op_def io_op_defs[] = {
     985             :         [IORING_OP_NOP] = {},
     986             :         [IORING_OP_READV] = {
     987             :                 .needs_file             = 1,
     988             :                 .unbound_nonreg_file    = 1,
     989             :                 .pollin                 = 1,
     990             :                 .buffer_select          = 1,
     991             :                 .needs_async_setup      = 1,
     992             :                 .plug                   = 1,
     993             :                 .audit_skip             = 1,
     994             :                 .async_size             = sizeof(struct io_async_rw),
     995             :         },
     996             :         [IORING_OP_WRITEV] = {
     997             :                 .needs_file             = 1,
     998             :                 .hash_reg_file          = 1,
     999             :                 .unbound_nonreg_file    = 1,
    1000             :                 .pollout                = 1,
    1001             :                 .needs_async_setup      = 1,
    1002             :                 .plug                   = 1,
    1003             :                 .audit_skip             = 1,
    1004             :                 .async_size             = sizeof(struct io_async_rw),
    1005             :         },
    1006             :         [IORING_OP_FSYNC] = {
    1007             :                 .needs_file             = 1,
    1008             :                 .audit_skip             = 1,
    1009             :         },
    1010             :         [IORING_OP_READ_FIXED] = {
    1011             :                 .needs_file             = 1,
    1012             :                 .unbound_nonreg_file    = 1,
    1013             :                 .pollin                 = 1,
    1014             :                 .plug                   = 1,
    1015             :                 .audit_skip             = 1,
    1016             :                 .async_size             = sizeof(struct io_async_rw),
    1017             :         },
    1018             :         [IORING_OP_WRITE_FIXED] = {
    1019             :                 .needs_file             = 1,
    1020             :                 .hash_reg_file          = 1,
    1021             :                 .unbound_nonreg_file    = 1,
    1022             :                 .pollout                = 1,
    1023             :                 .plug                   = 1,
    1024             :                 .audit_skip             = 1,
    1025             :                 .async_size             = sizeof(struct io_async_rw),
    1026             :         },
    1027             :         [IORING_OP_POLL_ADD] = {
    1028             :                 .needs_file             = 1,
    1029             :                 .unbound_nonreg_file    = 1,
    1030             :                 .audit_skip             = 1,
    1031             :         },
    1032             :         [IORING_OP_POLL_REMOVE] = {
    1033             :                 .audit_skip             = 1,
    1034             :         },
    1035             :         [IORING_OP_SYNC_FILE_RANGE] = {
    1036             :                 .needs_file             = 1,
    1037             :                 .audit_skip             = 1,
    1038             :         },
    1039             :         [IORING_OP_SENDMSG] = {
    1040             :                 .needs_file             = 1,
    1041             :                 .unbound_nonreg_file    = 1,
    1042             :                 .pollout                = 1,
    1043             :                 .needs_async_setup      = 1,
    1044             :                 .async_size             = sizeof(struct io_async_msghdr),
    1045             :         },
    1046             :         [IORING_OP_RECVMSG] = {
    1047             :                 .needs_file             = 1,
    1048             :                 .unbound_nonreg_file    = 1,
    1049             :                 .pollin                 = 1,
    1050             :                 .buffer_select          = 1,
    1051             :                 .needs_async_setup      = 1,
    1052             :                 .async_size             = sizeof(struct io_async_msghdr),
    1053             :         },
    1054             :         [IORING_OP_TIMEOUT] = {
    1055             :                 .audit_skip             = 1,
    1056             :                 .async_size             = sizeof(struct io_timeout_data),
    1057             :         },
    1058             :         [IORING_OP_TIMEOUT_REMOVE] = {
    1059             :                 /* used by timeout updates' prep() */
    1060             :                 .audit_skip             = 1,
    1061             :         },
    1062             :         [IORING_OP_ACCEPT] = {
    1063             :                 .needs_file             = 1,
    1064             :                 .unbound_nonreg_file    = 1,
    1065             :                 .pollin                 = 1,
    1066             :                 .poll_exclusive         = 1,
    1067             :         },
    1068             :         [IORING_OP_ASYNC_CANCEL] = {
    1069             :                 .audit_skip             = 1,
    1070             :         },
    1071             :         [IORING_OP_LINK_TIMEOUT] = {
    1072             :                 .audit_skip             = 1,
    1073             :                 .async_size             = sizeof(struct io_timeout_data),
    1074             :         },
    1075             :         [IORING_OP_CONNECT] = {
    1076             :                 .needs_file             = 1,
    1077             :                 .unbound_nonreg_file    = 1,
    1078             :                 .pollout                = 1,
    1079             :                 .needs_async_setup      = 1,
    1080             :                 .async_size             = sizeof(struct io_async_connect),
    1081             :         },
    1082             :         [IORING_OP_FALLOCATE] = {
    1083             :                 .needs_file             = 1,
    1084             :         },
    1085             :         [IORING_OP_OPENAT] = {},
    1086             :         [IORING_OP_CLOSE] = {},
    1087             :         [IORING_OP_FILES_UPDATE] = {
    1088             :                 .audit_skip             = 1,
    1089             :         },
    1090             :         [IORING_OP_STATX] = {
    1091             :                 .audit_skip             = 1,
    1092             :         },
    1093             :         [IORING_OP_READ] = {
    1094             :                 .needs_file             = 1,
    1095             :                 .unbound_nonreg_file    = 1,
    1096             :                 .pollin                 = 1,
    1097             :                 .buffer_select          = 1,
    1098             :                 .plug                   = 1,
    1099             :                 .audit_skip             = 1,
    1100             :                 .async_size             = sizeof(struct io_async_rw),
    1101             :         },
    1102             :         [IORING_OP_WRITE] = {
    1103             :                 .needs_file             = 1,
    1104             :                 .hash_reg_file          = 1,
    1105             :                 .unbound_nonreg_file    = 1,
    1106             :                 .pollout                = 1,
    1107             :                 .plug                   = 1,
    1108             :                 .audit_skip             = 1,
    1109             :                 .async_size             = sizeof(struct io_async_rw),
    1110             :         },
    1111             :         [IORING_OP_FADVISE] = {
    1112             :                 .needs_file             = 1,
    1113             :                 .audit_skip             = 1,
    1114             :         },
    1115             :         [IORING_OP_MADVISE] = {},
    1116             :         [IORING_OP_SEND] = {
    1117             :                 .needs_file             = 1,
    1118             :                 .unbound_nonreg_file    = 1,
    1119             :                 .pollout                = 1,
    1120             :                 .audit_skip             = 1,
    1121             :         },
    1122             :         [IORING_OP_RECV] = {
    1123             :                 .needs_file             = 1,
    1124             :                 .unbound_nonreg_file    = 1,
    1125             :                 .pollin                 = 1,
    1126             :                 .buffer_select          = 1,
    1127             :                 .audit_skip             = 1,
    1128             :         },
    1129             :         [IORING_OP_OPENAT2] = {
    1130             :         },
    1131             :         [IORING_OP_EPOLL_CTL] = {
    1132             :                 .unbound_nonreg_file    = 1,
    1133             :                 .audit_skip             = 1,
    1134             :         },
    1135             :         [IORING_OP_SPLICE] = {
    1136             :                 .needs_file             = 1,
    1137             :                 .hash_reg_file          = 1,
    1138             :                 .unbound_nonreg_file    = 1,
    1139             :                 .audit_skip             = 1,
    1140             :         },
    1141             :         [IORING_OP_PROVIDE_BUFFERS] = {
    1142             :                 .audit_skip             = 1,
    1143             :         },
    1144             :         [IORING_OP_REMOVE_BUFFERS] = {
    1145             :                 .audit_skip             = 1,
    1146             :         },
    1147             :         [IORING_OP_TEE] = {
    1148             :                 .needs_file             = 1,
    1149             :                 .hash_reg_file          = 1,
    1150             :                 .unbound_nonreg_file    = 1,
    1151             :                 .audit_skip             = 1,
    1152             :         },
    1153             :         [IORING_OP_SHUTDOWN] = {
    1154             :                 .needs_file             = 1,
    1155             :         },
    1156             :         [IORING_OP_RENAMEAT] = {},
    1157             :         [IORING_OP_UNLINKAT] = {},
    1158             :         [IORING_OP_MKDIRAT] = {},
    1159             :         [IORING_OP_SYMLINKAT] = {},
    1160             :         [IORING_OP_LINKAT] = {},
    1161             :         [IORING_OP_MSG_RING] = {
    1162             :                 .needs_file             = 1,
    1163             :         },
    1164             : };
    1165             : 
    1166             : /* requests with any of those set should undergo io_disarm_next() */
    1167             : #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
    1168             : 
    1169             : static bool io_disarm_next(struct io_kiocb *req);
    1170             : static void io_uring_del_tctx_node(unsigned long index);
    1171             : static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
    1172             :                                          struct task_struct *task,
    1173             :                                          bool cancel_all);
    1174             : static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
    1175             : 
    1176             : static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
    1177             : 
    1178             : static void io_put_req(struct io_kiocb *req);
    1179             : static void io_put_req_deferred(struct io_kiocb *req);
    1180             : static void io_dismantle_req(struct io_kiocb *req);
    1181             : static void io_queue_linked_timeout(struct io_kiocb *req);
    1182             : static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
    1183             :                                      struct io_uring_rsrc_update2 *up,
    1184             :                                      unsigned nr_args);
    1185             : static void io_clean_op(struct io_kiocb *req);
    1186             : static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
    1187             :                                              unsigned issue_flags);
    1188             : static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
    1189             : static void io_drop_inflight_file(struct io_kiocb *req);
    1190             : static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
    1191             : static void __io_queue_sqe(struct io_kiocb *req);
    1192             : static void io_rsrc_put_work(struct work_struct *work);
    1193             : 
    1194             : static void io_req_task_queue(struct io_kiocb *req);
    1195             : static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
    1196             : static int io_req_prep_async(struct io_kiocb *req);
    1197             : 
    1198             : static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
    1199             :                                  unsigned int issue_flags, u32 slot_index);
    1200             : static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
    1201             : 
    1202             : static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
    1203             : static void io_eventfd_signal(struct io_ring_ctx *ctx);
    1204             : 
    1205             : static struct kmem_cache *req_cachep;
    1206             : 
    1207             : static const struct file_operations io_uring_fops;
    1208             : 
    1209           0 : struct sock *io_uring_get_socket(struct file *file)
    1210             : {
    1211             : #if defined(CONFIG_UNIX)
    1212             :         if (file->f_op == &io_uring_fops) {
    1213             :                 struct io_ring_ctx *ctx = file->private_data;
    1214             : 
    1215             :                 return ctx->ring_sock->sk;
    1216             :         }
    1217             : #endif
    1218           0 :         return NULL;
    1219             : }
    1220             : EXPORT_SYMBOL(io_uring_get_socket);
    1221             : 
    1222             : static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
    1223             : {
    1224           0 :         if (!*locked) {
    1225           0 :                 mutex_lock(&ctx->uring_lock);
    1226           0 :                 *locked = true;
    1227             :         }
    1228             : }
    1229             : 
    1230             : #define io_for_each_link(pos, head) \
    1231             :         for (pos = (head); pos; pos = pos->link)
    1232             : 
    1233             : /*
    1234             :  * Shamelessly stolen from the mm implementation of page reference checking,
    1235             :  * see commit f958d7b528b1 for details.
    1236             :  */
    1237             : #define req_ref_zero_or_close_to_overflow(req)  \
    1238             :         ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
    1239             : 
    1240           0 : static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
    1241             : {
    1242           0 :         WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
    1243           0 :         return atomic_inc_not_zero(&req->refs);
    1244             : }
    1245             : 
    1246           0 : static inline bool req_ref_put_and_test(struct io_kiocb *req)
    1247             : {
    1248           0 :         if (likely(!(req->flags & REQ_F_REFCOUNT)))
    1249             :                 return true;
    1250             : 
    1251           0 :         WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
    1252           0 :         return atomic_dec_and_test(&req->refs);
    1253             : }
    1254             : 
    1255           0 : static inline void req_ref_get(struct io_kiocb *req)
    1256             : {
    1257           0 :         WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
    1258           0 :         WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
    1259           0 :         atomic_inc(&req->refs);
    1260           0 : }
    1261             : 
    1262             : static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
    1263             : {
    1264           0 :         if (!wq_list_empty(&ctx->submit_state.compl_reqs))
    1265           0 :                 __io_submit_flush_completions(ctx);
    1266             : }
    1267             : 
    1268             : static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
    1269             : {
    1270           0 :         if (!(req->flags & REQ_F_REFCOUNT)) {
    1271           0 :                 req->flags |= REQ_F_REFCOUNT;
    1272           0 :                 atomic_set(&req->refs, nr);
    1273             :         }
    1274             : }
    1275             : 
    1276             : static inline void io_req_set_refcount(struct io_kiocb *req)
    1277             : {
    1278           0 :         __io_req_set_refcount(req, 1);
    1279             : }
    1280             : 
    1281             : #define IO_RSRC_REF_BATCH       100
    1282             : 
    1283             : static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
    1284             :                                           struct io_ring_ctx *ctx)
    1285             :         __must_hold(&ctx->uring_lock)
    1286             : {
    1287           0 :         struct percpu_ref *ref = req->fixed_rsrc_refs;
    1288             : 
    1289           0 :         if (ref) {
    1290           0 :                 if (ref == &ctx->rsrc_node->refs)
    1291           0 :                         ctx->rsrc_cached_refs++;
    1292             :                 else
    1293             :                         percpu_ref_put(ref);
    1294             :         }
    1295             : }
    1296             : 
    1297             : static inline void io_req_put_rsrc(struct io_kiocb *req, struct io_ring_ctx *ctx)
    1298             : {
    1299           0 :         if (req->fixed_rsrc_refs)
    1300           0 :                 percpu_ref_put(req->fixed_rsrc_refs);
    1301             : }
    1302             : 
    1303           0 : static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
    1304             :         __must_hold(&ctx->uring_lock)
    1305             : {
    1306           0 :         if (ctx->rsrc_cached_refs) {
    1307           0 :                 percpu_ref_put_many(&ctx->rsrc_node->refs, ctx->rsrc_cached_refs);
    1308           0 :                 ctx->rsrc_cached_refs = 0;
    1309             :         }
    1310           0 : }
    1311             : 
    1312             : static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
    1313             :         __must_hold(&ctx->uring_lock)
    1314             : {
    1315           0 :         ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
    1316           0 :         percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
    1317             : }
    1318             : 
    1319           0 : static inline void io_req_set_rsrc_node(struct io_kiocb *req,
    1320             :                                         struct io_ring_ctx *ctx,
    1321             :                                         unsigned int issue_flags)
    1322             : {
    1323           0 :         if (!req->fixed_rsrc_refs) {
    1324           0 :                 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
    1325             : 
    1326           0 :                 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
    1327             :                         lockdep_assert_held(&ctx->uring_lock);
    1328           0 :                         ctx->rsrc_cached_refs--;
    1329           0 :                         if (unlikely(ctx->rsrc_cached_refs < 0))
    1330           0 :                                 io_rsrc_refs_refill(ctx);
    1331             :                 } else {
    1332           0 :                         percpu_ref_get(req->fixed_rsrc_refs);
    1333             :                 }
    1334             :         }
    1335           0 : }
    1336             : 
    1337             : static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
    1338             : {
    1339           0 :         struct io_buffer *kbuf = req->kbuf;
    1340             :         unsigned int cflags;
    1341             : 
    1342           0 :         cflags = IORING_CQE_F_BUFFER | (kbuf->bid << IORING_CQE_BUFFER_SHIFT);
    1343           0 :         req->flags &= ~REQ_F_BUFFER_SELECTED;
    1344           0 :         list_add(&kbuf->list, list);
    1345           0 :         req->kbuf = NULL;
    1346             :         return cflags;
    1347             : }
    1348             : 
    1349             : static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
    1350             : {
    1351           0 :         lockdep_assert_held(&req->ctx->completion_lock);
    1352             : 
    1353           0 :         if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
    1354             :                 return 0;
    1355           0 :         return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
    1356             : }
    1357             : 
    1358           0 : static inline unsigned int io_put_kbuf(struct io_kiocb *req,
    1359             :                                        unsigned issue_flags)
    1360             : {
    1361             :         unsigned int cflags;
    1362             : 
    1363           0 :         if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
    1364             :                 return 0;
    1365             : 
    1366             :         /*
    1367             :          * We can add this buffer back to two lists:
    1368             :          *
    1369             :          * 1) The io_buffers_cache list. This one is protected by the
    1370             :          *    ctx->uring_lock. If we already hold this lock, add back to this
    1371             :          *    list as we can grab it from issue as well.
    1372             :          * 2) The io_buffers_comp list. This one is protected by the
    1373             :          *    ctx->completion_lock.
    1374             :          *
    1375             :          * We migrate buffers from the comp_list to the issue cache list
    1376             :          * when we need one.
    1377             :          */
    1378           0 :         if (issue_flags & IO_URING_F_UNLOCKED) {
    1379           0 :                 struct io_ring_ctx *ctx = req->ctx;
    1380             : 
    1381           0 :                 spin_lock(&ctx->completion_lock);
    1382           0 :                 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
    1383           0 :                 spin_unlock(&ctx->completion_lock);
    1384             :         } else {
    1385           0 :                 lockdep_assert_held(&req->ctx->uring_lock);
    1386             : 
    1387           0 :                 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
    1388             :         }
    1389             : 
    1390             :         return cflags;
    1391             : }
    1392             : 
    1393             : static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
    1394             :                                                  unsigned int bgid)
    1395             : {
    1396             :         struct list_head *hash_list;
    1397             :         struct io_buffer_list *bl;
    1398             : 
    1399           0 :         hash_list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
    1400           0 :         list_for_each_entry(bl, hash_list, list)
    1401           0 :                 if (bl->bgid == bgid || bgid == -1U)
    1402             :                         return bl;
    1403             : 
    1404             :         return NULL;
    1405             : }
    1406             : 
    1407           0 : static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
    1408             : {
    1409           0 :         struct io_ring_ctx *ctx = req->ctx;
    1410             :         struct io_buffer_list *bl;
    1411             :         struct io_buffer *buf;
    1412             : 
    1413           0 :         if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
    1414             :                 return;
    1415             :         /* don't recycle if we already did IO to this buffer */
    1416           0 :         if (req->flags & REQ_F_PARTIAL_IO)
    1417             :                 return;
    1418             : 
    1419           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
    1420           0 :                 mutex_lock(&ctx->uring_lock);
    1421             : 
    1422             :         lockdep_assert_held(&ctx->uring_lock);
    1423             : 
    1424           0 :         buf = req->kbuf;
    1425           0 :         bl = io_buffer_get_list(ctx, buf->bgid);
    1426           0 :         list_add(&buf->list, &bl->buf_list);
    1427           0 :         req->flags &= ~REQ_F_BUFFER_SELECTED;
    1428           0 :         req->kbuf = NULL;
    1429             : 
    1430           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
    1431           0 :                 mutex_unlock(&ctx->uring_lock);
    1432             : }
    1433             : 
    1434             : static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
    1435             :                           bool cancel_all)
    1436             :         __must_hold(&req->ctx->timeout_lock)
    1437             : {
    1438           0 :         if (task && head->task != task)
    1439             :                 return false;
    1440             :         return cancel_all;
    1441             : }
    1442             : 
    1443             : /*
    1444             :  * As io_match_task() but protected against racing with linked timeouts.
    1445             :  * User must not hold timeout_lock.
    1446             :  */
    1447             : static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
    1448             :                                bool cancel_all)
    1449             : {
    1450           0 :         if (task && head->task != task)
    1451             :                 return false;
    1452             :         return cancel_all;
    1453             : }
    1454             : 
    1455             : static inline bool req_has_async_data(struct io_kiocb *req)
    1456             : {
    1457           0 :         return req->flags & REQ_F_ASYNC_DATA;
    1458             : }
    1459             : 
    1460             : static inline void req_set_fail(struct io_kiocb *req)
    1461             : {
    1462           0 :         req->flags |= REQ_F_FAIL;
    1463           0 :         if (req->flags & REQ_F_CQE_SKIP) {
    1464             :                 req->flags &= ~REQ_F_CQE_SKIP;
    1465           0 :                 req->flags |= REQ_F_SKIP_LINK_CQES;
    1466             :         }
    1467             : }
    1468             : 
    1469             : static inline void req_fail_link_node(struct io_kiocb *req, int res)
    1470             : {
    1471           0 :         req_set_fail(req);
    1472           0 :         req->result = res;
    1473             : }
    1474             : 
    1475           0 : static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
    1476             : {
    1477           0 :         struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
    1478             : 
    1479           0 :         complete(&ctx->ref_comp);
    1480           0 : }
    1481             : 
    1482             : static inline bool io_is_timeout_noseq(struct io_kiocb *req)
    1483             : {
    1484             :         return !req->timeout.off;
    1485             : }
    1486             : 
    1487           0 : static __cold void io_fallback_req_func(struct work_struct *work)
    1488             : {
    1489           0 :         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
    1490             :                                                 fallback_work.work);
    1491           0 :         struct llist_node *node = llist_del_all(&ctx->fallback_llist);
    1492             :         struct io_kiocb *req, *tmp;
    1493           0 :         bool locked = false;
    1494             : 
    1495           0 :         percpu_ref_get(&ctx->refs);
    1496           0 :         llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
    1497           0 :                 req->io_task_work.func(req, &locked);
    1498             : 
    1499           0 :         if (locked) {
    1500           0 :                 io_submit_flush_completions(ctx);
    1501           0 :                 mutex_unlock(&ctx->uring_lock);
    1502             :         }
    1503           0 :         percpu_ref_put(&ctx->refs);
    1504           0 : }
    1505             : 
    1506           0 : static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
    1507             : {
    1508             :         struct io_ring_ctx *ctx;
    1509             :         int i, hash_bits;
    1510             : 
    1511           0 :         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
    1512           0 :         if (!ctx)
    1513             :                 return NULL;
    1514             : 
    1515             :         /*
    1516             :          * Use 5 bits less than the max cq entries, that should give us around
    1517             :          * 32 entries per hash list if totally full and uniformly spread.
    1518             :          */
    1519           0 :         hash_bits = ilog2(p->cq_entries);
    1520           0 :         hash_bits -= 5;
    1521           0 :         if (hash_bits <= 0)
    1522           0 :                 hash_bits = 1;
    1523           0 :         ctx->cancel_hash_bits = hash_bits;
    1524           0 :         ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
    1525             :                                         GFP_KERNEL);
    1526           0 :         if (!ctx->cancel_hash)
    1527             :                 goto err;
    1528           0 :         __hash_init(ctx->cancel_hash, 1U << hash_bits);
    1529             : 
    1530           0 :         ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
    1531           0 :         if (!ctx->dummy_ubuf)
    1532             :                 goto err;
    1533             :         /* set invalid range, so io_import_fixed() fails meeting it */
    1534           0 :         ctx->dummy_ubuf->ubuf = -1UL;
    1535             : 
    1536           0 :         ctx->io_buffers = kcalloc(1U << IO_BUFFERS_HASH_BITS,
    1537             :                                         sizeof(struct list_head), GFP_KERNEL);
    1538           0 :         if (!ctx->io_buffers)
    1539             :                 goto err;
    1540           0 :         for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++)
    1541           0 :                 INIT_LIST_HEAD(&ctx->io_buffers[i]);
    1542             : 
    1543           0 :         if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
    1544             :                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
    1545             :                 goto err;
    1546             : 
    1547           0 :         ctx->flags = p->flags;
    1548           0 :         init_waitqueue_head(&ctx->sqo_sq_wait);
    1549           0 :         INIT_LIST_HEAD(&ctx->sqd_list);
    1550           0 :         INIT_LIST_HEAD(&ctx->cq_overflow_list);
    1551           0 :         INIT_LIST_HEAD(&ctx->io_buffers_cache);
    1552           0 :         INIT_LIST_HEAD(&ctx->apoll_cache);
    1553           0 :         init_completion(&ctx->ref_comp);
    1554           0 :         xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
    1555           0 :         mutex_init(&ctx->uring_lock);
    1556           0 :         init_waitqueue_head(&ctx->cq_wait);
    1557           0 :         spin_lock_init(&ctx->completion_lock);
    1558           0 :         spin_lock_init(&ctx->timeout_lock);
    1559           0 :         INIT_WQ_LIST(&ctx->iopoll_list);
    1560           0 :         INIT_LIST_HEAD(&ctx->io_buffers_pages);
    1561           0 :         INIT_LIST_HEAD(&ctx->io_buffers_comp);
    1562           0 :         INIT_LIST_HEAD(&ctx->defer_list);
    1563           0 :         INIT_LIST_HEAD(&ctx->timeout_list);
    1564           0 :         INIT_LIST_HEAD(&ctx->ltimeout_list);
    1565           0 :         spin_lock_init(&ctx->rsrc_ref_lock);
    1566           0 :         INIT_LIST_HEAD(&ctx->rsrc_ref_list);
    1567           0 :         INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
    1568           0 :         init_llist_head(&ctx->rsrc_put_llist);
    1569           0 :         INIT_LIST_HEAD(&ctx->tctx_list);
    1570           0 :         ctx->submit_state.free_list.next = NULL;
    1571           0 :         INIT_WQ_LIST(&ctx->locked_free_list);
    1572           0 :         INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
    1573           0 :         INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
    1574             :         return ctx;
    1575             : err:
    1576           0 :         kfree(ctx->dummy_ubuf);
    1577           0 :         kfree(ctx->cancel_hash);
    1578           0 :         kfree(ctx->io_buffers);
    1579           0 :         kfree(ctx);
    1580             :         return NULL;
    1581             : }
    1582             : 
    1583             : static void io_account_cq_overflow(struct io_ring_ctx *ctx)
    1584             : {
    1585           0 :         struct io_rings *r = ctx->rings;
    1586             : 
    1587           0 :         WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
    1588           0 :         ctx->cq_extra--;
    1589             : }
    1590             : 
    1591             : static bool req_need_defer(struct io_kiocb *req, u32 seq)
    1592             : {
    1593           0 :         if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
    1594           0 :                 struct io_ring_ctx *ctx = req->ctx;
    1595             : 
    1596           0 :                 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
    1597             :         }
    1598             : 
    1599             :         return false;
    1600             : }
    1601             : 
    1602             : #define FFS_NOWAIT              0x1UL
    1603             : #define FFS_ISREG               0x2UL
    1604             : #define FFS_MASK                ~(FFS_NOWAIT|FFS_ISREG)
    1605             : 
    1606             : static inline bool io_req_ffs_set(struct io_kiocb *req)
    1607             : {
    1608           0 :         return req->flags & REQ_F_FIXED_FILE;
    1609             : }
    1610             : 
    1611           0 : static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
    1612             : {
    1613           0 :         if (WARN_ON_ONCE(!req->link))
    1614             :                 return NULL;
    1615             : 
    1616           0 :         req->flags &= ~REQ_F_ARM_LTIMEOUT;
    1617           0 :         req->flags |= REQ_F_LINK_TIMEOUT;
    1618             : 
    1619             :         /* linked timeouts should have two refs once prep'ed */
    1620           0 :         io_req_set_refcount(req);
    1621           0 :         __io_req_set_refcount(req->link, 2);
    1622           0 :         return req->link;
    1623             : }
    1624             : 
    1625             : static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
    1626             : {
    1627           0 :         if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
    1628             :                 return NULL;
    1629           0 :         return __io_prep_linked_timeout(req);
    1630             : }
    1631             : 
    1632           0 : static void io_prep_async_work(struct io_kiocb *req)
    1633             : {
    1634           0 :         const struct io_op_def *def = &io_op_defs[req->opcode];
    1635           0 :         struct io_ring_ctx *ctx = req->ctx;
    1636             : 
    1637           0 :         if (!(req->flags & REQ_F_CREDS)) {
    1638           0 :                 req->flags |= REQ_F_CREDS;
    1639           0 :                 req->creds = get_current_cred();
    1640             :         }
    1641             : 
    1642           0 :         req->work.list.next = NULL;
    1643           0 :         req->work.flags = 0;
    1644           0 :         if (req->flags & REQ_F_FORCE_ASYNC)
    1645           0 :                 req->work.flags |= IO_WQ_WORK_CONCURRENT;
    1646             : 
    1647           0 :         if (req->flags & REQ_F_ISREG) {
    1648           0 :                 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
    1649           0 :                         io_wq_hash_work(&req->work, file_inode(req->file));
    1650           0 :         } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
    1651           0 :                 if (def->unbound_nonreg_file)
    1652           0 :                         req->work.flags |= IO_WQ_WORK_UNBOUND;
    1653             :         }
    1654           0 : }
    1655             : 
    1656           0 : static void io_prep_async_link(struct io_kiocb *req)
    1657             : {
    1658             :         struct io_kiocb *cur;
    1659             : 
    1660           0 :         if (req->flags & REQ_F_LINK_TIMEOUT) {
    1661           0 :                 struct io_ring_ctx *ctx = req->ctx;
    1662             : 
    1663           0 :                 spin_lock_irq(&ctx->timeout_lock);
    1664           0 :                 io_for_each_link(cur, req)
    1665           0 :                         io_prep_async_work(cur);
    1666           0 :                 spin_unlock_irq(&ctx->timeout_lock);
    1667             :         } else {
    1668           0 :                 io_for_each_link(cur, req)
    1669           0 :                         io_prep_async_work(cur);
    1670             :         }
    1671           0 : }
    1672             : 
    1673             : static inline void io_req_add_compl_list(struct io_kiocb *req)
    1674             : {
    1675           0 :         struct io_ring_ctx *ctx = req->ctx;
    1676           0 :         struct io_submit_state *state = &ctx->submit_state;
    1677             : 
    1678           0 :         if (!(req->flags & REQ_F_CQE_SKIP))
    1679           0 :                 ctx->submit_state.flush_cqes = true;
    1680           0 :         wq_list_add_tail(&req->comp_list, &state->compl_reqs);
    1681             : }
    1682             : 
    1683           0 : static void io_queue_async_work(struct io_kiocb *req, bool *dont_use)
    1684             : {
    1685           0 :         struct io_ring_ctx *ctx = req->ctx;
    1686           0 :         struct io_kiocb *link = io_prep_linked_timeout(req);
    1687           0 :         struct io_uring_task *tctx = req->task->io_uring;
    1688             : 
    1689           0 :         BUG_ON(!tctx);
    1690           0 :         BUG_ON(!tctx->io_wq);
    1691             : 
    1692             :         /* init ->work of the whole link before punting */
    1693           0 :         io_prep_async_link(req);
    1694             : 
    1695             :         /*
    1696             :          * Not expected to happen, but if we do have a bug where this _can_
    1697             :          * happen, catch it here and ensure the request is marked as
    1698             :          * canceled. That will make io-wq go through the usual work cancel
    1699             :          * procedure rather than attempt to run this request (or create a new
    1700             :          * worker for it).
    1701             :          */
    1702           0 :         if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
    1703           0 :                 req->work.flags |= IO_WQ_WORK_CANCEL;
    1704             : 
    1705           0 :         trace_io_uring_queue_async_work(ctx, req, req->user_data, req->opcode, req->flags,
    1706           0 :                                         &req->work, io_wq_is_hashed(&req->work));
    1707           0 :         io_wq_enqueue(tctx->io_wq, &req->work);
    1708           0 :         if (link)
    1709           0 :                 io_queue_linked_timeout(link);
    1710           0 : }
    1711             : 
    1712           0 : static void io_kill_timeout(struct io_kiocb *req, int status)
    1713             :         __must_hold(&req->ctx->completion_lock)
    1714             :         __must_hold(&req->ctx->timeout_lock)
    1715             : {
    1716           0 :         struct io_timeout_data *io = req->async_data;
    1717             : 
    1718           0 :         if (hrtimer_try_to_cancel(&io->timer) != -1) {
    1719           0 :                 if (status)
    1720           0 :                         req_set_fail(req);
    1721           0 :                 atomic_set(&req->ctx->cq_timeouts,
    1722           0 :                         atomic_read(&req->ctx->cq_timeouts) + 1);
    1723           0 :                 list_del_init(&req->timeout.list);
    1724           0 :                 io_fill_cqe_req(req, status, 0);
    1725           0 :                 io_put_req_deferred(req);
    1726             :         }
    1727           0 : }
    1728             : 
    1729           0 : static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
    1730             : {
    1731           0 :         while (!list_empty(&ctx->defer_list)) {
    1732           0 :                 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
    1733             :                                                 struct io_defer_entry, list);
    1734             : 
    1735           0 :                 if (req_need_defer(de->req, de->seq))
    1736             :                         break;
    1737           0 :                 list_del_init(&de->list);
    1738           0 :                 io_req_task_queue(de->req);
    1739           0 :                 kfree(de);
    1740             :         }
    1741           0 : }
    1742             : 
    1743           0 : static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
    1744             :         __must_hold(&ctx->completion_lock)
    1745             : {
    1746           0 :         u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
    1747             :         struct io_kiocb *req, *tmp;
    1748             : 
    1749           0 :         spin_lock_irq(&ctx->timeout_lock);
    1750           0 :         list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
    1751             :                 u32 events_needed, events_got;
    1752             : 
    1753           0 :                 if (io_is_timeout_noseq(req))
    1754             :                         break;
    1755             : 
    1756             :                 /*
    1757             :                  * Since seq can easily wrap around over time, subtract
    1758             :                  * the last seq at which timeouts were flushed before comparing.
    1759             :                  * Assuming not more than 2^31-1 events have happened since,
    1760             :                  * these subtractions won't have wrapped, so we can check if
    1761             :                  * target is in [last_seq, current_seq] by comparing the two.
    1762             :                  */
    1763           0 :                 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
    1764           0 :                 events_got = seq - ctx->cq_last_tm_flush;
    1765           0 :                 if (events_got < events_needed)
    1766             :                         break;
    1767             : 
    1768           0 :                 io_kill_timeout(req, 0);
    1769             :         }
    1770           0 :         ctx->cq_last_tm_flush = seq;
    1771           0 :         spin_unlock_irq(&ctx->timeout_lock);
    1772           0 : }
    1773             : 
    1774             : static inline void io_commit_cqring(struct io_ring_ctx *ctx)
    1775             : {
    1776             :         /* order cqe stores with ring update */
    1777           0 :         smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
    1778             : }
    1779             : 
    1780           0 : static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
    1781             : {
    1782           0 :         if (ctx->off_timeout_used || ctx->drain_active) {
    1783           0 :                 spin_lock(&ctx->completion_lock);
    1784           0 :                 if (ctx->off_timeout_used)
    1785           0 :                         io_flush_timeouts(ctx);
    1786           0 :                 if (ctx->drain_active)
    1787           0 :                         io_queue_deferred(ctx);
    1788           0 :                 io_commit_cqring(ctx);
    1789           0 :                 spin_unlock(&ctx->completion_lock);
    1790             :         }
    1791           0 :         if (ctx->has_evfd)
    1792           0 :                 io_eventfd_signal(ctx);
    1793           0 : }
    1794             : 
    1795             : static inline bool io_sqring_full(struct io_ring_ctx *ctx)
    1796             : {
    1797           0 :         struct io_rings *r = ctx->rings;
    1798             : 
    1799           0 :         return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
    1800             : }
    1801             : 
    1802             : static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
    1803             : {
    1804           0 :         return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
    1805             : }
    1806             : 
    1807             : static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
    1808             : {
    1809           0 :         struct io_rings *rings = ctx->rings;
    1810           0 :         unsigned tail, mask = ctx->cq_entries - 1;
    1811             : 
    1812             :         /*
    1813             :          * writes to the cq entry need to come after reading head; the
    1814             :          * control dependency is enough as we're using WRITE_ONCE to
    1815             :          * fill the cq entry
    1816             :          */
    1817           0 :         if (__io_cqring_events(ctx) == ctx->cq_entries)
    1818             :                 return NULL;
    1819             : 
    1820           0 :         tail = ctx->cached_cq_tail++;
    1821           0 :         return &rings->cqes[tail & mask];
    1822             : }
    1823             : 
    1824           0 : static void io_eventfd_signal(struct io_ring_ctx *ctx)
    1825             : {
    1826             :         struct io_ev_fd *ev_fd;
    1827             : 
    1828             :         rcu_read_lock();
    1829             :         /*
    1830             :          * rcu_dereference ctx->io_ev_fd once and use it for both for checking
    1831             :          * and eventfd_signal
    1832             :          */
    1833           0 :         ev_fd = rcu_dereference(ctx->io_ev_fd);
    1834             : 
    1835             :         /*
    1836             :          * Check again if ev_fd exists incase an io_eventfd_unregister call
    1837             :          * completed between the NULL check of ctx->io_ev_fd at the start of
    1838             :          * the function and rcu_read_lock.
    1839             :          */
    1840           0 :         if (unlikely(!ev_fd))
    1841             :                 goto out;
    1842           0 :         if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
    1843             :                 goto out;
    1844             : 
    1845           0 :         if (!ev_fd->eventfd_async || io_wq_current_is_worker())
    1846           0 :                 eventfd_signal(ev_fd->cq_ev_fd, 1);
    1847             : out:
    1848             :         rcu_read_unlock();
    1849           0 : }
    1850             : 
    1851             : static inline void io_cqring_wake(struct io_ring_ctx *ctx)
    1852             : {
    1853             :         /*
    1854             :          * wake_up_all() may seem excessive, but io_wake_function() and
    1855             :          * io_should_wake() handle the termination of the loop and only
    1856             :          * wake as many waiters as we need to.
    1857             :          */
    1858           0 :         if (wq_has_sleeper(&ctx->cq_wait))
    1859           0 :                 wake_up_all(&ctx->cq_wait);
    1860             : }
    1861             : 
    1862             : /*
    1863             :  * This should only get called when at least one event has been posted.
    1864             :  * Some applications rely on the eventfd notification count only changing
    1865             :  * IFF a new CQE has been added to the CQ ring. There's no depedency on
    1866             :  * 1:1 relationship between how many times this function is called (and
    1867             :  * hence the eventfd count) and number of CQEs posted to the CQ ring.
    1868             :  */
    1869           0 : static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
    1870             : {
    1871           0 :         if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
    1872             :                      ctx->has_evfd))
    1873           0 :                 __io_commit_cqring_flush(ctx);
    1874             : 
    1875           0 :         io_cqring_wake(ctx);
    1876           0 : }
    1877             : 
    1878           0 : static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
    1879             : {
    1880           0 :         if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
    1881             :                      ctx->has_evfd))
    1882           0 :                 __io_commit_cqring_flush(ctx);
    1883             : 
    1884           0 :         if (ctx->flags & IORING_SETUP_SQPOLL)
    1885             :                 io_cqring_wake(ctx);
    1886           0 : }
    1887             : 
    1888             : /* Returns true if there are no backlogged entries after the flush */
    1889           0 : static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
    1890             : {
    1891             :         bool all_flushed, posted;
    1892             : 
    1893           0 :         if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
    1894             :                 return false;
    1895             : 
    1896           0 :         posted = false;
    1897           0 :         spin_lock(&ctx->completion_lock);
    1898           0 :         while (!list_empty(&ctx->cq_overflow_list)) {
    1899           0 :                 struct io_uring_cqe *cqe = io_get_cqe(ctx);
    1900             :                 struct io_overflow_cqe *ocqe;
    1901             : 
    1902           0 :                 if (!cqe && !force)
    1903             :                         break;
    1904           0 :                 ocqe = list_first_entry(&ctx->cq_overflow_list,
    1905             :                                         struct io_overflow_cqe, list);
    1906           0 :                 if (cqe)
    1907           0 :                         memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
    1908             :                 else
    1909           0 :                         io_account_cq_overflow(ctx);
    1910             : 
    1911           0 :                 posted = true;
    1912           0 :                 list_del(&ocqe->list);
    1913           0 :                 kfree(ocqe);
    1914             :         }
    1915             : 
    1916           0 :         all_flushed = list_empty(&ctx->cq_overflow_list);
    1917           0 :         if (all_flushed) {
    1918           0 :                 clear_bit(0, &ctx->check_cq_overflow);
    1919           0 :                 WRITE_ONCE(ctx->rings->sq_flags,
    1920             :                            ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
    1921             :         }
    1922             : 
    1923           0 :         if (posted)
    1924           0 :                 io_commit_cqring(ctx);
    1925           0 :         spin_unlock(&ctx->completion_lock);
    1926           0 :         if (posted)
    1927           0 :                 io_cqring_ev_posted(ctx);
    1928             :         return all_flushed;
    1929             : }
    1930             : 
    1931           0 : static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
    1932             : {
    1933           0 :         bool ret = true;
    1934             : 
    1935           0 :         if (test_bit(0, &ctx->check_cq_overflow)) {
    1936             :                 /* iopoll syncs against uring_lock, not completion_lock */
    1937           0 :                 if (ctx->flags & IORING_SETUP_IOPOLL)
    1938           0 :                         mutex_lock(&ctx->uring_lock);
    1939           0 :                 ret = __io_cqring_overflow_flush(ctx, false);
    1940           0 :                 if (ctx->flags & IORING_SETUP_IOPOLL)
    1941           0 :                         mutex_unlock(&ctx->uring_lock);
    1942             :         }
    1943             : 
    1944           0 :         return ret;
    1945             : }
    1946             : 
    1947             : /* must to be called somewhat shortly after putting a request */
    1948           0 : static inline void io_put_task(struct task_struct *task, int nr)
    1949             : {
    1950           0 :         struct io_uring_task *tctx = task->io_uring;
    1951             : 
    1952           0 :         if (likely(task == current)) {
    1953           0 :                 tctx->cached_refs += nr;
    1954             :         } else {
    1955           0 :                 percpu_counter_sub(&tctx->inflight, nr);
    1956           0 :                 if (unlikely(atomic_read(&tctx->in_idle)))
    1957           0 :                         wake_up(&tctx->wait);
    1958           0 :                 put_task_struct_many(task, nr);
    1959             :         }
    1960           0 : }
    1961             : 
    1962           0 : static void io_task_refs_refill(struct io_uring_task *tctx)
    1963             : {
    1964           0 :         unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
    1965             : 
    1966           0 :         percpu_counter_add(&tctx->inflight, refill);
    1967           0 :         refcount_add(refill, &current->usage);
    1968           0 :         tctx->cached_refs += refill;
    1969           0 : }
    1970             : 
    1971           0 : static inline void io_get_task_refs(int nr)
    1972             : {
    1973           0 :         struct io_uring_task *tctx = current->io_uring;
    1974             : 
    1975           0 :         tctx->cached_refs -= nr;
    1976           0 :         if (unlikely(tctx->cached_refs < 0))
    1977           0 :                 io_task_refs_refill(tctx);
    1978           0 : }
    1979             : 
    1980           0 : static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
    1981             : {
    1982           0 :         struct io_uring_task *tctx = task->io_uring;
    1983           0 :         unsigned int refs = tctx->cached_refs;
    1984             : 
    1985           0 :         if (refs) {
    1986           0 :                 tctx->cached_refs = 0;
    1987           0 :                 percpu_counter_sub(&tctx->inflight, refs);
    1988           0 :                 put_task_struct_many(task, refs);
    1989             :         }
    1990           0 : }
    1991             : 
    1992           0 : static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
    1993             :                                      s32 res, u32 cflags)
    1994             : {
    1995             :         struct io_overflow_cqe *ocqe;
    1996             : 
    1997           0 :         ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
    1998           0 :         if (!ocqe) {
    1999             :                 /*
    2000             :                  * If we're in ring overflow flush mode, or in task cancel mode,
    2001             :                  * or cannot allocate an overflow entry, then we need to drop it
    2002             :                  * on the floor.
    2003             :                  */
    2004           0 :                 io_account_cq_overflow(ctx);
    2005           0 :                 return false;
    2006             :         }
    2007           0 :         if (list_empty(&ctx->cq_overflow_list)) {
    2008           0 :                 set_bit(0, &ctx->check_cq_overflow);
    2009           0 :                 WRITE_ONCE(ctx->rings->sq_flags,
    2010             :                            ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
    2011             : 
    2012             :         }
    2013           0 :         ocqe->cqe.user_data = user_data;
    2014           0 :         ocqe->cqe.res = res;
    2015           0 :         ocqe->cqe.flags = cflags;
    2016           0 :         list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
    2017           0 :         return true;
    2018             : }
    2019             : 
    2020           0 : static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
    2021             :                                  s32 res, u32 cflags)
    2022             : {
    2023             :         struct io_uring_cqe *cqe;
    2024             : 
    2025             :         /*
    2026             :          * If we can't get a cq entry, userspace overflowed the
    2027             :          * submission (by quite a lot). Increment the overflow count in
    2028             :          * the ring.
    2029             :          */
    2030           0 :         cqe = io_get_cqe(ctx);
    2031           0 :         if (likely(cqe)) {
    2032           0 :                 WRITE_ONCE(cqe->user_data, user_data);
    2033           0 :                 WRITE_ONCE(cqe->res, res);
    2034           0 :                 WRITE_ONCE(cqe->flags, cflags);
    2035           0 :                 return true;
    2036             :         }
    2037           0 :         return io_cqring_event_overflow(ctx, user_data, res, cflags);
    2038             : }
    2039             : 
    2040             : static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
    2041             : {
    2042           0 :         trace_io_uring_complete(req->ctx, req, req->user_data, res, cflags);
    2043           0 :         return __io_fill_cqe(req->ctx, req->user_data, res, cflags);
    2044             : }
    2045             : 
    2046           0 : static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
    2047             : {
    2048           0 :         if (!(req->flags & REQ_F_CQE_SKIP))
    2049           0 :                 __io_fill_cqe_req(req, res, cflags);
    2050           0 : }
    2051             : 
    2052           0 : static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
    2053             :                                      s32 res, u32 cflags)
    2054             : {
    2055           0 :         ctx->cq_extra++;
    2056           0 :         trace_io_uring_complete(ctx, NULL, user_data, res, cflags);
    2057           0 :         return __io_fill_cqe(ctx, user_data, res, cflags);
    2058             : }
    2059             : 
    2060           0 : static void __io_req_complete_post(struct io_kiocb *req, s32 res,
    2061             :                                    u32 cflags)
    2062             : {
    2063           0 :         struct io_ring_ctx *ctx = req->ctx;
    2064             : 
    2065           0 :         if (!(req->flags & REQ_F_CQE_SKIP))
    2066           0 :                 __io_fill_cqe_req(req, res, cflags);
    2067             :         /*
    2068             :          * If we're the last reference to this request, add to our locked
    2069             :          * free_list cache.
    2070             :          */
    2071           0 :         if (req_ref_put_and_test(req)) {
    2072           0 :                 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
    2073           0 :                         if (req->flags & IO_DISARM_MASK)
    2074           0 :                                 io_disarm_next(req);
    2075           0 :                         if (req->link) {
    2076           0 :                                 io_req_task_queue(req->link);
    2077           0 :                                 req->link = NULL;
    2078             :                         }
    2079             :                 }
    2080           0 :                 io_req_put_rsrc(req, ctx);
    2081             :                 /*
    2082             :                  * Selected buffer deallocation in io_clean_op() assumes that
    2083             :                  * we don't hold ->completion_lock. Clean them here to avoid
    2084             :                  * deadlocks.
    2085             :                  */
    2086           0 :                 io_put_kbuf_comp(req);
    2087           0 :                 io_dismantle_req(req);
    2088           0 :                 io_put_task(req->task, 1);
    2089           0 :                 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
    2090           0 :                 ctx->locked_free_nr++;
    2091             :         }
    2092           0 : }
    2093             : 
    2094           0 : static void io_req_complete_post(struct io_kiocb *req, s32 res,
    2095             :                                  u32 cflags)
    2096             : {
    2097           0 :         struct io_ring_ctx *ctx = req->ctx;
    2098             : 
    2099           0 :         spin_lock(&ctx->completion_lock);
    2100           0 :         __io_req_complete_post(req, res, cflags);
    2101           0 :         io_commit_cqring(ctx);
    2102           0 :         spin_unlock(&ctx->completion_lock);
    2103           0 :         io_cqring_ev_posted(ctx);
    2104           0 : }
    2105             : 
    2106             : static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
    2107             :                                          u32 cflags)
    2108             : {
    2109           0 :         req->result = res;
    2110           0 :         req->cflags = cflags;
    2111           0 :         req->flags |= REQ_F_COMPLETE_INLINE;
    2112             : }
    2113             : 
    2114             : static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
    2115             :                                      s32 res, u32 cflags)
    2116             : {
    2117           0 :         if (issue_flags & IO_URING_F_COMPLETE_DEFER)
    2118             :                 io_req_complete_state(req, res, cflags);
    2119             :         else
    2120           0 :                 io_req_complete_post(req, res, cflags);
    2121             : }
    2122             : 
    2123             : static inline void io_req_complete(struct io_kiocb *req, s32 res)
    2124             : {
    2125           0 :         __io_req_complete(req, 0, res, 0);
    2126             : }
    2127             : 
    2128           0 : static void io_req_complete_failed(struct io_kiocb *req, s32 res)
    2129             : {
    2130           0 :         req_set_fail(req);
    2131           0 :         io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
    2132           0 : }
    2133             : 
    2134             : static void io_req_complete_fail_submit(struct io_kiocb *req)
    2135             : {
    2136             :         /*
    2137             :          * We don't submit, fail them all, for that replace hardlinks with
    2138             :          * normal links. Extra REQ_F_LINK is tolerated.
    2139             :          */
    2140           0 :         req->flags &= ~REQ_F_HARDLINK;
    2141           0 :         req->flags |= REQ_F_LINK;
    2142           0 :         io_req_complete_failed(req, req->result);
    2143             : }
    2144             : 
    2145             : /*
    2146             :  * Don't initialise the fields below on every allocation, but do that in
    2147             :  * advance and keep them valid across allocations.
    2148             :  */
    2149             : static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
    2150             : {
    2151           0 :         req->ctx = ctx;
    2152           0 :         req->link = NULL;
    2153           0 :         req->async_data = NULL;
    2154             :         /* not necessary, but safer to zero */
    2155           0 :         req->result = 0;
    2156             : }
    2157             : 
    2158             : static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
    2159             :                                         struct io_submit_state *state)
    2160             : {
    2161           0 :         spin_lock(&ctx->completion_lock);
    2162           0 :         wq_list_splice(&ctx->locked_free_list, &state->free_list);
    2163           0 :         ctx->locked_free_nr = 0;
    2164           0 :         spin_unlock(&ctx->completion_lock);
    2165             : }
    2166             : 
    2167             : /* Returns true IFF there are requests in the cache */
    2168             : static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
    2169             : {
    2170           0 :         struct io_submit_state *state = &ctx->submit_state;
    2171             : 
    2172             :         /*
    2173             :          * If we have more than a batch's worth of requests in our IRQ side
    2174             :          * locked cache, grab the lock and move them over to our submission
    2175             :          * side cache.
    2176             :          */
    2177           0 :         if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
    2178             :                 io_flush_cached_locked_reqs(ctx, state);
    2179           0 :         return !!state->free_list.next;
    2180             : }
    2181             : 
    2182             : /*
    2183             :  * A request might get retired back into the request caches even before opcode
    2184             :  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
    2185             :  * Because of that, io_alloc_req() should be called only under ->uring_lock
    2186             :  * and with extra caution to not get a request that is still worked on.
    2187             :  */
    2188           0 : static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
    2189             :         __must_hold(&ctx->uring_lock)
    2190             : {
    2191           0 :         struct io_submit_state *state = &ctx->submit_state;
    2192           0 :         gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
    2193             :         void *reqs[IO_REQ_ALLOC_BATCH];
    2194             :         struct io_kiocb *req;
    2195             :         int ret, i;
    2196             : 
    2197           0 :         if (likely(state->free_list.next || io_flush_cached_reqs(ctx)))
    2198             :                 return true;
    2199             : 
    2200           0 :         ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
    2201             : 
    2202             :         /*
    2203             :          * Bulk alloc is all-or-nothing. If we fail to get a batch,
    2204             :          * retry single alloc to be on the safe side.
    2205             :          */
    2206           0 :         if (unlikely(ret <= 0)) {
    2207           0 :                 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
    2208           0 :                 if (!reqs[0])
    2209             :                         return false;
    2210             :                 ret = 1;
    2211             :         }
    2212             : 
    2213           0 :         percpu_ref_get_many(&ctx->refs, ret);
    2214           0 :         for (i = 0; i < ret; i++) {
    2215           0 :                 req = reqs[i];
    2216             : 
    2217           0 :                 io_preinit_req(req, ctx);
    2218           0 :                 wq_stack_add_head(&req->comp_list, &state->free_list);
    2219             :         }
    2220             :         return true;
    2221             : }
    2222             : 
    2223             : static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
    2224             : {
    2225           0 :         if (unlikely(!ctx->submit_state.free_list.next))
    2226           0 :                 return __io_alloc_req_refill(ctx);
    2227             :         return true;
    2228             : }
    2229             : 
    2230             : static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
    2231             : {
    2232             :         struct io_wq_work_node *node;
    2233             : 
    2234           0 :         node = wq_stack_extract(&ctx->submit_state.free_list);
    2235           0 :         return container_of(node, struct io_kiocb, comp_list);
    2236             : }
    2237             : 
    2238             : static inline void io_put_file(struct file *file)
    2239             : {
    2240           0 :         if (file)
    2241           0 :                 fput(file);
    2242             : }
    2243             : 
    2244           0 : static inline void io_dismantle_req(struct io_kiocb *req)
    2245             : {
    2246           0 :         unsigned int flags = req->flags;
    2247             : 
    2248           0 :         if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
    2249           0 :                 io_clean_op(req);
    2250           0 :         if (!(flags & REQ_F_FIXED_FILE))
    2251           0 :                 io_put_file(req->file);
    2252           0 : }
    2253             : 
    2254           0 : static __cold void __io_free_req(struct io_kiocb *req)
    2255             : {
    2256           0 :         struct io_ring_ctx *ctx = req->ctx;
    2257             : 
    2258           0 :         io_req_put_rsrc(req, ctx);
    2259           0 :         io_dismantle_req(req);
    2260           0 :         io_put_task(req->task, 1);
    2261             : 
    2262           0 :         spin_lock(&ctx->completion_lock);
    2263           0 :         wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
    2264           0 :         ctx->locked_free_nr++;
    2265           0 :         spin_unlock(&ctx->completion_lock);
    2266           0 : }
    2267             : 
    2268             : static inline void io_remove_next_linked(struct io_kiocb *req)
    2269             : {
    2270           0 :         struct io_kiocb *nxt = req->link;
    2271             : 
    2272           0 :         req->link = nxt->link;
    2273           0 :         nxt->link = NULL;
    2274             : }
    2275             : 
    2276           0 : static bool io_kill_linked_timeout(struct io_kiocb *req)
    2277             :         __must_hold(&req->ctx->completion_lock)
    2278             :         __must_hold(&req->ctx->timeout_lock)
    2279             : {
    2280           0 :         struct io_kiocb *link = req->link;
    2281             : 
    2282           0 :         if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
    2283           0 :                 struct io_timeout_data *io = link->async_data;
    2284             : 
    2285           0 :                 io_remove_next_linked(req);
    2286           0 :                 link->timeout.head = NULL;
    2287           0 :                 if (hrtimer_try_to_cancel(&io->timer) != -1) {
    2288           0 :                         list_del(&link->timeout.list);
    2289             :                         /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
    2290           0 :                         io_fill_cqe_req(link, -ECANCELED, 0);
    2291           0 :                         io_put_req_deferred(link);
    2292           0 :                         return true;
    2293             :                 }
    2294             :         }
    2295             :         return false;
    2296             : }
    2297             : 
    2298           0 : static void io_fail_links(struct io_kiocb *req)
    2299             :         __must_hold(&req->ctx->completion_lock)
    2300             : {
    2301           0 :         struct io_kiocb *nxt, *link = req->link;
    2302           0 :         bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
    2303             : 
    2304           0 :         req->link = NULL;
    2305           0 :         while (link) {
    2306           0 :                 long res = -ECANCELED;
    2307             : 
    2308           0 :                 if (link->flags & REQ_F_FAIL)
    2309           0 :                         res = link->result;
    2310             : 
    2311           0 :                 nxt = link->link;
    2312           0 :                 link->link = NULL;
    2313             : 
    2314           0 :                 trace_io_uring_fail_link(req->ctx, req, req->user_data,
    2315           0 :                                         req->opcode, link);
    2316             : 
    2317           0 :                 if (!ignore_cqes) {
    2318           0 :                         link->flags &= ~REQ_F_CQE_SKIP;
    2319           0 :                         io_fill_cqe_req(link, res, 0);
    2320             :                 }
    2321           0 :                 io_put_req_deferred(link);
    2322           0 :                 link = nxt;
    2323             :         }
    2324           0 : }
    2325             : 
    2326           0 : static bool io_disarm_next(struct io_kiocb *req)
    2327             :         __must_hold(&req->ctx->completion_lock)
    2328             : {
    2329           0 :         bool posted = false;
    2330             : 
    2331           0 :         if (req->flags & REQ_F_ARM_LTIMEOUT) {
    2332           0 :                 struct io_kiocb *link = req->link;
    2333             : 
    2334           0 :                 req->flags &= ~REQ_F_ARM_LTIMEOUT;
    2335           0 :                 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
    2336           0 :                         io_remove_next_linked(req);
    2337             :                         /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
    2338           0 :                         io_fill_cqe_req(link, -ECANCELED, 0);
    2339           0 :                         io_put_req_deferred(link);
    2340           0 :                         posted = true;
    2341             :                 }
    2342           0 :         } else if (req->flags & REQ_F_LINK_TIMEOUT) {
    2343           0 :                 struct io_ring_ctx *ctx = req->ctx;
    2344             : 
    2345           0 :                 spin_lock_irq(&ctx->timeout_lock);
    2346           0 :                 posted = io_kill_linked_timeout(req);
    2347           0 :                 spin_unlock_irq(&ctx->timeout_lock);
    2348             :         }
    2349           0 :         if (unlikely((req->flags & REQ_F_FAIL) &&
    2350             :                      !(req->flags & REQ_F_HARDLINK))) {
    2351           0 :                 posted |= (req->link != NULL);
    2352           0 :                 io_fail_links(req);
    2353             :         }
    2354           0 :         return posted;
    2355             : }
    2356             : 
    2357           0 : static void __io_req_find_next_prep(struct io_kiocb *req)
    2358             : {
    2359           0 :         struct io_ring_ctx *ctx = req->ctx;
    2360             :         bool posted;
    2361             : 
    2362           0 :         spin_lock(&ctx->completion_lock);
    2363           0 :         posted = io_disarm_next(req);
    2364           0 :         if (posted)
    2365           0 :                 io_commit_cqring(ctx);
    2366           0 :         spin_unlock(&ctx->completion_lock);
    2367           0 :         if (posted)
    2368           0 :                 io_cqring_ev_posted(ctx);
    2369           0 : }
    2370             : 
    2371             : static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
    2372             : {
    2373             :         struct io_kiocb *nxt;
    2374             : 
    2375           0 :         if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
    2376             :                 return NULL;
    2377             :         /*
    2378             :          * If LINK is set, we have dependent requests in this chain. If we
    2379             :          * didn't fail this request, queue the first one up, moving any other
    2380             :          * dependencies to the next request. In case of failure, fail the rest
    2381             :          * of the chain.
    2382             :          */
    2383           0 :         if (unlikely(req->flags & IO_DISARM_MASK))
    2384           0 :                 __io_req_find_next_prep(req);
    2385           0 :         nxt = req->link;
    2386           0 :         req->link = NULL;
    2387             :         return nxt;
    2388             : }
    2389             : 
    2390           0 : static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
    2391             : {
    2392           0 :         if (!ctx)
    2393             :                 return;
    2394           0 :         if (*locked) {
    2395           0 :                 io_submit_flush_completions(ctx);
    2396           0 :                 mutex_unlock(&ctx->uring_lock);
    2397           0 :                 *locked = false;
    2398             :         }
    2399           0 :         percpu_ref_put(&ctx->refs);
    2400             : }
    2401             : 
    2402             : static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
    2403             : {
    2404           0 :         io_commit_cqring(ctx);
    2405           0 :         spin_unlock(&ctx->completion_lock);
    2406           0 :         io_cqring_ev_posted(ctx);
    2407             : }
    2408             : 
    2409           0 : static void handle_prev_tw_list(struct io_wq_work_node *node,
    2410             :                                 struct io_ring_ctx **ctx, bool *uring_locked)
    2411             : {
    2412           0 :         if (*ctx && !*uring_locked)
    2413           0 :                 spin_lock(&(*ctx)->completion_lock);
    2414             : 
    2415             :         do {
    2416           0 :                 struct io_wq_work_node *next = node->next;
    2417           0 :                 struct io_kiocb *req = container_of(node, struct io_kiocb,
    2418             :                                                     io_task_work.node);
    2419             : 
    2420           0 :                 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
    2421             : 
    2422           0 :                 if (req->ctx != *ctx) {
    2423           0 :                         if (unlikely(!*uring_locked && *ctx))
    2424           0 :                                 ctx_commit_and_unlock(*ctx);
    2425             : 
    2426           0 :                         ctx_flush_and_put(*ctx, uring_locked);
    2427           0 :                         *ctx = req->ctx;
    2428             :                         /* if not contended, grab and improve batching */
    2429           0 :                         *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
    2430           0 :                         percpu_ref_get(&(*ctx)->refs);
    2431           0 :                         if (unlikely(!*uring_locked))
    2432           0 :                                 spin_lock(&(*ctx)->completion_lock);
    2433             :                 }
    2434           0 :                 if (likely(*uring_locked))
    2435           0 :                         req->io_task_work.func(req, uring_locked);
    2436             :                 else
    2437           0 :                         __io_req_complete_post(req, req->result,
    2438             :                                                 io_put_kbuf_comp(req));
    2439           0 :                 node = next;
    2440           0 :         } while (node);
    2441             : 
    2442           0 :         if (unlikely(!*uring_locked))
    2443           0 :                 ctx_commit_and_unlock(*ctx);
    2444           0 : }
    2445             : 
    2446           0 : static void handle_tw_list(struct io_wq_work_node *node,
    2447             :                            struct io_ring_ctx **ctx, bool *locked)
    2448             : {
    2449             :         do {
    2450           0 :                 struct io_wq_work_node *next = node->next;
    2451           0 :                 struct io_kiocb *req = container_of(node, struct io_kiocb,
    2452             :                                                     io_task_work.node);
    2453             : 
    2454           0 :                 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
    2455             : 
    2456           0 :                 if (req->ctx != *ctx) {
    2457           0 :                         ctx_flush_and_put(*ctx, locked);
    2458           0 :                         *ctx = req->ctx;
    2459             :                         /* if not contended, grab and improve batching */
    2460           0 :                         *locked = mutex_trylock(&(*ctx)->uring_lock);
    2461           0 :                         percpu_ref_get(&(*ctx)->refs);
    2462             :                 }
    2463           0 :                 req->io_task_work.func(req, locked);
    2464           0 :                 node = next;
    2465           0 :         } while (node);
    2466           0 : }
    2467             : 
    2468           0 : static void tctx_task_work(struct callback_head *cb)
    2469             : {
    2470           0 :         bool uring_locked = false;
    2471           0 :         struct io_ring_ctx *ctx = NULL;
    2472           0 :         struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
    2473             :                                                   task_work);
    2474             : 
    2475           0 :         while (1) {
    2476             :                 struct io_wq_work_node *node1, *node2;
    2477             : 
    2478           0 :                 if (!tctx->task_list.first &&
    2479           0 :                     !tctx->prior_task_list.first && uring_locked)
    2480           0 :                         io_submit_flush_completions(ctx);
    2481             : 
    2482           0 :                 spin_lock_irq(&tctx->task_lock);
    2483           0 :                 node1 = tctx->prior_task_list.first;
    2484           0 :                 node2 = tctx->task_list.first;
    2485           0 :                 INIT_WQ_LIST(&tctx->task_list);
    2486           0 :                 INIT_WQ_LIST(&tctx->prior_task_list);
    2487           0 :                 if (!node2 && !node1)
    2488           0 :                         tctx->task_running = false;
    2489           0 :                 spin_unlock_irq(&tctx->task_lock);
    2490           0 :                 if (!node2 && !node1)
    2491             :                         break;
    2492             : 
    2493           0 :                 if (node1)
    2494           0 :                         handle_prev_tw_list(node1, &ctx, &uring_locked);
    2495             : 
    2496           0 :                 if (node2)
    2497           0 :                         handle_tw_list(node2, &ctx, &uring_locked);
    2498           0 :                 cond_resched();
    2499             :         }
    2500             : 
    2501           0 :         ctx_flush_and_put(ctx, &uring_locked);
    2502             : 
    2503             :         /* relaxed read is enough as only the task itself sets ->in_idle */
    2504           0 :         if (unlikely(atomic_read(&tctx->in_idle)))
    2505           0 :                 io_uring_drop_tctx_refs(current);
    2506           0 : }
    2507             : 
    2508           0 : static void io_req_task_work_add(struct io_kiocb *req, bool priority)
    2509             : {
    2510           0 :         struct task_struct *tsk = req->task;
    2511           0 :         struct io_uring_task *tctx = tsk->io_uring;
    2512             :         enum task_work_notify_mode notify;
    2513             :         struct io_wq_work_node *node;
    2514             :         unsigned long flags;
    2515             :         bool running;
    2516             : 
    2517           0 :         WARN_ON_ONCE(!tctx);
    2518             : 
    2519           0 :         io_drop_inflight_file(req);
    2520             : 
    2521           0 :         spin_lock_irqsave(&tctx->task_lock, flags);
    2522           0 :         if (priority)
    2523           0 :                 wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
    2524             :         else
    2525           0 :                 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
    2526           0 :         running = tctx->task_running;
    2527           0 :         if (!running)
    2528           0 :                 tctx->task_running = true;
    2529           0 :         spin_unlock_irqrestore(&tctx->task_lock, flags);
    2530             : 
    2531             :         /* task_work already pending, we're done */
    2532           0 :         if (running)
    2533             :                 return;
    2534             : 
    2535             :         /*
    2536             :          * SQPOLL kernel thread doesn't need notification, just a wakeup. For
    2537             :          * all other cases, use TWA_SIGNAL unconditionally to ensure we're
    2538             :          * processing task_work. There's no reliable way to tell if TWA_RESUME
    2539             :          * will do the job.
    2540             :          */
    2541           0 :         notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
    2542           0 :         if (likely(!task_work_add(tsk, &tctx->task_work, notify))) {
    2543           0 :                 if (notify == TWA_NONE)
    2544           0 :                         wake_up_process(tsk);
    2545             :                 return;
    2546             :         }
    2547             : 
    2548           0 :         spin_lock_irqsave(&tctx->task_lock, flags);
    2549           0 :         tctx->task_running = false;
    2550           0 :         node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
    2551           0 :         spin_unlock_irqrestore(&tctx->task_lock, flags);
    2552             : 
    2553           0 :         while (node) {
    2554           0 :                 req = container_of(node, struct io_kiocb, io_task_work.node);
    2555           0 :                 node = node->next;
    2556           0 :                 if (llist_add(&req->io_task_work.fallback_node,
    2557           0 :                               &req->ctx->fallback_llist))
    2558           0 :                         schedule_delayed_work(&req->ctx->fallback_work, 1);
    2559             :         }
    2560             : }
    2561             : 
    2562           0 : static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
    2563             : {
    2564           0 :         struct io_ring_ctx *ctx = req->ctx;
    2565             : 
    2566             :         /* not needed for normal modes, but SQPOLL depends on it */
    2567           0 :         io_tw_lock(ctx, locked);
    2568           0 :         io_req_complete_failed(req, req->result);
    2569           0 : }
    2570             : 
    2571           0 : static void io_req_task_submit(struct io_kiocb *req, bool *locked)
    2572             : {
    2573           0 :         struct io_ring_ctx *ctx = req->ctx;
    2574             : 
    2575           0 :         io_tw_lock(ctx, locked);
    2576             :         /* req->task == current here, checking PF_EXITING is safe */
    2577           0 :         if (likely(!(req->task->flags & PF_EXITING)))
    2578           0 :                 __io_queue_sqe(req);
    2579             :         else
    2580           0 :                 io_req_complete_failed(req, -EFAULT);
    2581           0 : }
    2582             : 
    2583             : static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
    2584             : {
    2585           0 :         req->result = ret;
    2586           0 :         req->io_task_work.func = io_req_task_cancel;
    2587           0 :         io_req_task_work_add(req, false);
    2588             : }
    2589             : 
    2590             : static void io_req_task_queue(struct io_kiocb *req)
    2591             : {
    2592           0 :         req->io_task_work.func = io_req_task_submit;
    2593           0 :         io_req_task_work_add(req, false);
    2594             : }
    2595             : 
    2596             : static void io_req_task_queue_reissue(struct io_kiocb *req)
    2597             : {
    2598           0 :         req->io_task_work.func = io_queue_async_work;
    2599           0 :         io_req_task_work_add(req, false);
    2600             : }
    2601             : 
    2602           0 : static inline void io_queue_next(struct io_kiocb *req)
    2603             : {
    2604           0 :         struct io_kiocb *nxt = io_req_find_next(req);
    2605             : 
    2606           0 :         if (nxt)
    2607             :                 io_req_task_queue(nxt);
    2608           0 : }
    2609             : 
    2610             : static void io_free_req(struct io_kiocb *req)
    2611             : {
    2612           0 :         io_queue_next(req);
    2613           0 :         __io_free_req(req);
    2614             : }
    2615             : 
    2616           0 : static void io_free_req_work(struct io_kiocb *req, bool *locked)
    2617             : {
    2618           0 :         io_free_req(req);
    2619           0 : }
    2620             : 
    2621           0 : static void io_free_batch_list(struct io_ring_ctx *ctx,
    2622             :                                 struct io_wq_work_node *node)
    2623             :         __must_hold(&ctx->uring_lock)
    2624             : {
    2625           0 :         struct task_struct *task = NULL;
    2626           0 :         int task_refs = 0;
    2627             : 
    2628             :         do {
    2629           0 :                 struct io_kiocb *req = container_of(node, struct io_kiocb,
    2630             :                                                     comp_list);
    2631             : 
    2632           0 :                 if (unlikely(req->flags & REQ_F_REFCOUNT)) {
    2633           0 :                         node = req->comp_list.next;
    2634           0 :                         if (!req_ref_put_and_test(req))
    2635           0 :                                 continue;
    2636             :                 }
    2637             : 
    2638           0 :                 io_req_put_rsrc_locked(req, ctx);
    2639           0 :                 io_queue_next(req);
    2640           0 :                 io_dismantle_req(req);
    2641             : 
    2642           0 :                 if (req->task != task) {
    2643           0 :                         if (task)
    2644           0 :                                 io_put_task(task, task_refs);
    2645           0 :                         task = req->task;
    2646           0 :                         task_refs = 0;
    2647             :                 }
    2648           0 :                 task_refs++;
    2649           0 :                 node = req->comp_list.next;
    2650           0 :                 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
    2651           0 :         } while (node);
    2652             : 
    2653           0 :         if (task)
    2654           0 :                 io_put_task(task, task_refs);
    2655           0 : }
    2656             : 
    2657           0 : static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
    2658             :         __must_hold(&ctx->uring_lock)
    2659             : {
    2660             :         struct io_wq_work_node *node, *prev;
    2661           0 :         struct io_submit_state *state = &ctx->submit_state;
    2662             : 
    2663           0 :         if (state->flush_cqes) {
    2664           0 :                 spin_lock(&ctx->completion_lock);
    2665           0 :                 wq_list_for_each(node, prev, &state->compl_reqs) {
    2666           0 :                         struct io_kiocb *req = container_of(node, struct io_kiocb,
    2667             :                                                     comp_list);
    2668             : 
    2669           0 :                         if (!(req->flags & REQ_F_CQE_SKIP))
    2670           0 :                                 __io_fill_cqe_req(req, req->result, req->cflags);
    2671           0 :                         if ((req->flags & REQ_F_POLLED) && req->apoll) {
    2672           0 :                                 struct async_poll *apoll = req->apoll;
    2673             : 
    2674           0 :                                 if (apoll->double_poll)
    2675           0 :                                         kfree(apoll->double_poll);
    2676           0 :                                 list_add(&apoll->poll.wait.entry,
    2677             :                                                 &ctx->apoll_cache);
    2678           0 :                                 req->flags &= ~REQ_F_POLLED;
    2679             :                         }
    2680             :                 }
    2681             : 
    2682           0 :                 io_commit_cqring(ctx);
    2683           0 :                 spin_unlock(&ctx->completion_lock);
    2684           0 :                 io_cqring_ev_posted(ctx);
    2685           0 :                 state->flush_cqes = false;
    2686             :         }
    2687             : 
    2688           0 :         io_free_batch_list(ctx, state->compl_reqs.first);
    2689           0 :         INIT_WQ_LIST(&state->compl_reqs);
    2690           0 : }
    2691             : 
    2692             : /*
    2693             :  * Drop reference to request, return next in chain (if there is one) if this
    2694             :  * was the last reference to this request.
    2695             :  */
    2696           0 : static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
    2697             : {
    2698           0 :         struct io_kiocb *nxt = NULL;
    2699             : 
    2700           0 :         if (req_ref_put_and_test(req)) {
    2701           0 :                 nxt = io_req_find_next(req);
    2702           0 :                 __io_free_req(req);
    2703             :         }
    2704           0 :         return nxt;
    2705             : }
    2706             : 
    2707           0 : static inline void io_put_req(struct io_kiocb *req)
    2708             : {
    2709           0 :         if (req_ref_put_and_test(req))
    2710             :                 io_free_req(req);
    2711           0 : }
    2712             : 
    2713           0 : static inline void io_put_req_deferred(struct io_kiocb *req)
    2714             : {
    2715           0 :         if (req_ref_put_and_test(req)) {
    2716           0 :                 req->io_task_work.func = io_free_req_work;
    2717           0 :                 io_req_task_work_add(req, false);
    2718             :         }
    2719           0 : }
    2720             : 
    2721             : static unsigned io_cqring_events(struct io_ring_ctx *ctx)
    2722             : {
    2723             :         /* See comment at the top of this file */
    2724           0 :         smp_rmb();
    2725           0 :         return __io_cqring_events(ctx);
    2726             : }
    2727             : 
    2728             : static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
    2729             : {
    2730           0 :         struct io_rings *rings = ctx->rings;
    2731             : 
    2732             :         /* make sure SQ entry isn't read before tail */
    2733           0 :         return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
    2734             : }
    2735             : 
    2736           0 : static inline bool io_run_task_work(void)
    2737             : {
    2738           0 :         if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
    2739           0 :                 __set_current_state(TASK_RUNNING);
    2740           0 :                 clear_notify_signal();
    2741           0 :                 if (task_work_pending(current))
    2742           0 :                         task_work_run();
    2743             :                 return true;
    2744             :         }
    2745             : 
    2746             :         return false;
    2747             : }
    2748             : 
    2749           0 : static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
    2750             : {
    2751             :         struct io_wq_work_node *pos, *start, *prev;
    2752           0 :         unsigned int poll_flags = BLK_POLL_NOSLEEP;
    2753           0 :         DEFINE_IO_COMP_BATCH(iob);
    2754           0 :         int nr_events = 0;
    2755             : 
    2756             :         /*
    2757             :          * Only spin for completions if we don't have multiple devices hanging
    2758             :          * off our complete list.
    2759             :          */
    2760           0 :         if (ctx->poll_multi_queue || force_nonspin)
    2761           0 :                 poll_flags |= BLK_POLL_ONESHOT;
    2762             : 
    2763           0 :         wq_list_for_each(pos, start, &ctx->iopoll_list) {
    2764           0 :                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
    2765           0 :                 struct kiocb *kiocb = &req->rw.kiocb;
    2766             :                 int ret;
    2767             : 
    2768             :                 /*
    2769             :                  * Move completed and retryable entries to our local lists.
    2770             :                  * If we find a request that requires polling, break out
    2771             :                  * and complete those lists first, if we have entries there.
    2772             :                  */
    2773           0 :                 if (READ_ONCE(req->iopoll_completed))
    2774             :                         break;
    2775             : 
    2776           0 :                 ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
    2777           0 :                 if (unlikely(ret < 0))
    2778             :                         return ret;
    2779           0 :                 else if (ret)
    2780           0 :                         poll_flags |= BLK_POLL_ONESHOT;
    2781             : 
    2782             :                 /* iopoll may have completed current req */
    2783           0 :                 if (!rq_list_empty(iob.req_list) ||
    2784           0 :                     READ_ONCE(req->iopoll_completed))
    2785             :                         break;
    2786             :         }
    2787             : 
    2788           0 :         if (!rq_list_empty(iob.req_list))
    2789           0 :                 iob.complete(&iob);
    2790           0 :         else if (!pos)
    2791             :                 return 0;
    2792             : 
    2793             :         prev = start;
    2794           0 :         wq_list_for_each_resume(pos, prev) {
    2795           0 :                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
    2796             : 
    2797             :                 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
    2798           0 :                 if (!smp_load_acquire(&req->iopoll_completed))
    2799             :                         break;
    2800           0 :                 nr_events++;
    2801           0 :                 if (unlikely(req->flags & REQ_F_CQE_SKIP))
    2802           0 :                         continue;
    2803           0 :                 __io_fill_cqe_req(req, req->result, io_put_kbuf(req, 0));
    2804             :         }
    2805             : 
    2806           0 :         if (unlikely(!nr_events))
    2807             :                 return 0;
    2808             : 
    2809           0 :         io_commit_cqring(ctx);
    2810           0 :         io_cqring_ev_posted_iopoll(ctx);
    2811           0 :         pos = start ? start->next : ctx->iopoll_list.first;
    2812           0 :         wq_list_cut(&ctx->iopoll_list, prev, start);
    2813           0 :         io_free_batch_list(ctx, pos);
    2814           0 :         return nr_events;
    2815             : }
    2816             : 
    2817             : /*
    2818             :  * We can't just wait for polled events to come to us, we have to actively
    2819             :  * find and complete them.
    2820             :  */
    2821           0 : static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
    2822             : {
    2823           0 :         if (!(ctx->flags & IORING_SETUP_IOPOLL))
    2824             :                 return;
    2825             : 
    2826           0 :         mutex_lock(&ctx->uring_lock);
    2827           0 :         while (!wq_list_empty(&ctx->iopoll_list)) {
    2828             :                 /* let it sleep and repeat later if can't complete a request */
    2829           0 :                 if (io_do_iopoll(ctx, true) == 0)
    2830             :                         break;
    2831             :                 /*
    2832             :                  * Ensure we allow local-to-the-cpu processing to take place,
    2833             :                  * in this case we need to ensure that we reap all events.
    2834             :                  * Also let task_work, etc. to progress by releasing the mutex
    2835             :                  */
    2836           0 :                 if (need_resched()) {
    2837           0 :                         mutex_unlock(&ctx->uring_lock);
    2838           0 :                         cond_resched();
    2839           0 :                         mutex_lock(&ctx->uring_lock);
    2840             :                 }
    2841             :         }
    2842           0 :         mutex_unlock(&ctx->uring_lock);
    2843             : }
    2844             : 
    2845           0 : static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
    2846             : {
    2847           0 :         unsigned int nr_events = 0;
    2848           0 :         int ret = 0;
    2849             : 
    2850             :         /*
    2851             :          * We disallow the app entering submit/complete with polling, but we
    2852             :          * still need to lock the ring to prevent racing with polled issue
    2853             :          * that got punted to a workqueue.
    2854             :          */
    2855           0 :         mutex_lock(&ctx->uring_lock);
    2856             :         /*
    2857             :          * Don't enter poll loop if we already have events pending.
    2858             :          * If we do, we can potentially be spinning for commands that
    2859             :          * already triggered a CQE (eg in error).
    2860             :          */
    2861           0 :         if (test_bit(0, &ctx->check_cq_overflow))
    2862           0 :                 __io_cqring_overflow_flush(ctx, false);
    2863           0 :         if (io_cqring_events(ctx))
    2864             :                 goto out;
    2865             :         do {
    2866             :                 /*
    2867             :                  * If a submit got punted to a workqueue, we can have the
    2868             :                  * application entering polling for a command before it gets
    2869             :                  * issued. That app will hold the uring_lock for the duration
    2870             :                  * of the poll right here, so we need to take a breather every
    2871             :                  * now and then to ensure that the issue has a chance to add
    2872             :                  * the poll to the issued list. Otherwise we can spin here
    2873             :                  * forever, while the workqueue is stuck trying to acquire the
    2874             :                  * very same mutex.
    2875             :                  */
    2876           0 :                 if (wq_list_empty(&ctx->iopoll_list)) {
    2877           0 :                         u32 tail = ctx->cached_cq_tail;
    2878             : 
    2879           0 :                         mutex_unlock(&ctx->uring_lock);
    2880           0 :                         io_run_task_work();
    2881           0 :                         mutex_lock(&ctx->uring_lock);
    2882             : 
    2883             :                         /* some requests don't go through iopoll_list */
    2884           0 :                         if (tail != ctx->cached_cq_tail ||
    2885           0 :                             wq_list_empty(&ctx->iopoll_list))
    2886             :                                 break;
    2887             :                 }
    2888           0 :                 ret = io_do_iopoll(ctx, !min);
    2889           0 :                 if (ret < 0)
    2890             :                         break;
    2891           0 :                 nr_events += ret;
    2892           0 :                 ret = 0;
    2893           0 :         } while (nr_events < min && !need_resched());
    2894             : out:
    2895           0 :         mutex_unlock(&ctx->uring_lock);
    2896           0 :         return ret;
    2897             : }
    2898             : 
    2899             : static void kiocb_end_write(struct io_kiocb *req)
    2900             : {
    2901             :         /*
    2902             :          * Tell lockdep we inherited freeze protection from submission
    2903             :          * thread.
    2904             :          */
    2905           0 :         if (req->flags & REQ_F_ISREG) {
    2906           0 :                 struct super_block *sb = file_inode(req->file)->i_sb;
    2907             : 
    2908           0 :                 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
    2909             :                 sb_end_write(sb);
    2910             :         }
    2911             : }
    2912             : 
    2913             : #ifdef CONFIG_BLOCK
    2914           0 : static bool io_resubmit_prep(struct io_kiocb *req)
    2915             : {
    2916           0 :         struct io_async_rw *rw = req->async_data;
    2917             : 
    2918           0 :         if (!req_has_async_data(req))
    2919           0 :                 return !io_req_prep_async(req);
    2920           0 :         iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
    2921           0 :         return true;
    2922             : }
    2923             : 
    2924           0 : static bool io_rw_should_reissue(struct io_kiocb *req)
    2925             : {
    2926           0 :         umode_t mode = file_inode(req->file)->i_mode;
    2927           0 :         struct io_ring_ctx *ctx = req->ctx;
    2928             : 
    2929           0 :         if (!S_ISBLK(mode) && !S_ISREG(mode))
    2930             :                 return false;
    2931           0 :         if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
    2932           0 :             !(ctx->flags & IORING_SETUP_IOPOLL)))
    2933             :                 return false;
    2934             :         /*
    2935             :          * If ref is dying, we might be running poll reap from the exit work.
    2936             :          * Don't attempt to reissue from that path, just let it fail with
    2937             :          * -EAGAIN.
    2938             :          */
    2939           0 :         if (percpu_ref_is_dying(&ctx->refs))
    2940             :                 return false;
    2941             :         /*
    2942             :          * Play it safe and assume not safe to re-import and reissue if we're
    2943             :          * not in the original thread group (or in task context).
    2944             :          */
    2945           0 :         if (!same_thread_group(req->task, current) || !in_task())
    2946             :                 return false;
    2947             :         return true;
    2948             : }
    2949             : #else
    2950             : static bool io_resubmit_prep(struct io_kiocb *req)
    2951             : {
    2952             :         return false;
    2953             : }
    2954             : static bool io_rw_should_reissue(struct io_kiocb *req)
    2955             : {
    2956             :         return false;
    2957             : }
    2958             : #endif
    2959             : 
    2960           0 : static bool __io_complete_rw_common(struct io_kiocb *req, long res)
    2961             : {
    2962           0 :         if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
    2963           0 :                 kiocb_end_write(req);
    2964           0 :                 fsnotify_modify(req->file);
    2965             :         } else {
    2966           0 :                 fsnotify_access(req->file);
    2967             :         }
    2968           0 :         if (unlikely(res != req->result)) {
    2969           0 :                 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
    2970           0 :                     io_rw_should_reissue(req)) {
    2971           0 :                         req->flags |= REQ_F_REISSUE;
    2972           0 :                         return true;
    2973             :                 }
    2974           0 :                 req_set_fail(req);
    2975           0 :                 req->result = res;
    2976             :         }
    2977             :         return false;
    2978             : }
    2979             : 
    2980           0 : static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
    2981             : {
    2982           0 :         int res = req->result;
    2983             : 
    2984           0 :         if (*locked) {
    2985           0 :                 io_req_complete_state(req, res, io_put_kbuf(req, 0));
    2986             :                 io_req_add_compl_list(req);
    2987             :         } else {
    2988           0 :                 io_req_complete_post(req, res,
    2989             :                                         io_put_kbuf(req, IO_URING_F_UNLOCKED));
    2990             :         }
    2991           0 : }
    2992             : 
    2993           0 : static void __io_complete_rw(struct io_kiocb *req, long res,
    2994             :                              unsigned int issue_flags)
    2995             : {
    2996           0 :         if (__io_complete_rw_common(req, res))
    2997             :                 return;
    2998           0 :         __io_req_complete(req, issue_flags, req->result,
    2999             :                                 io_put_kbuf(req, issue_flags));
    3000             : }
    3001             : 
    3002           0 : static void io_complete_rw(struct kiocb *kiocb, long res)
    3003             : {
    3004           0 :         struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
    3005             : 
    3006           0 :         if (__io_complete_rw_common(req, res))
    3007             :                 return;
    3008           0 :         req->result = res;
    3009           0 :         req->io_task_work.func = io_req_task_complete;
    3010           0 :         io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
    3011             : }
    3012             : 
    3013           0 : static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
    3014             : {
    3015           0 :         struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
    3016             : 
    3017           0 :         if (kiocb->ki_flags & IOCB_WRITE)
    3018           0 :                 kiocb_end_write(req);
    3019           0 :         if (unlikely(res != req->result)) {
    3020           0 :                 if (res == -EAGAIN && io_rw_should_reissue(req)) {
    3021           0 :                         req->flags |= REQ_F_REISSUE;
    3022           0 :                         return;
    3023             :                 }
    3024           0 :                 req->result = res;
    3025             :         }
    3026             : 
    3027             :         /* order with io_iopoll_complete() checking ->iopoll_completed */
    3028           0 :         smp_store_release(&req->iopoll_completed, 1);
    3029             : }
    3030             : 
    3031             : /*
    3032             :  * After the iocb has been issued, it's safe to be found on the poll list.
    3033             :  * Adding the kiocb to the list AFTER submission ensures that we don't
    3034             :  * find it from a io_do_iopoll() thread before the issuer is done
    3035             :  * accessing the kiocb cookie.
    3036             :  */
    3037           0 : static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
    3038             : {
    3039           0 :         struct io_ring_ctx *ctx = req->ctx;
    3040           0 :         const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    3041             : 
    3042             :         /* workqueue context doesn't hold uring_lock, grab it now */
    3043           0 :         if (unlikely(needs_lock))
    3044           0 :                 mutex_lock(&ctx->uring_lock);
    3045             : 
    3046             :         /*
    3047             :          * Track whether we have multiple files in our lists. This will impact
    3048             :          * how we do polling eventually, not spinning if we're on potentially
    3049             :          * different devices.
    3050             :          */
    3051           0 :         if (wq_list_empty(&ctx->iopoll_list)) {
    3052           0 :                 ctx->poll_multi_queue = false;
    3053           0 :         } else if (!ctx->poll_multi_queue) {
    3054             :                 struct io_kiocb *list_req;
    3055             : 
    3056           0 :                 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
    3057             :                                         comp_list);
    3058           0 :                 if (list_req->file != req->file)
    3059           0 :                         ctx->poll_multi_queue = true;
    3060             :         }
    3061             : 
    3062             :         /*
    3063             :          * For fast devices, IO may have already completed. If it has, add
    3064             :          * it to the front so we find it first.
    3065             :          */
    3066           0 :         if (READ_ONCE(req->iopoll_completed))
    3067           0 :                 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
    3068             :         else
    3069           0 :                 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
    3070             : 
    3071           0 :         if (unlikely(needs_lock)) {
    3072             :                 /*
    3073             :                  * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
    3074             :                  * in sq thread task context or in io worker task context. If
    3075             :                  * current task context is sq thread, we don't need to check
    3076             :                  * whether should wake up sq thread.
    3077             :                  */
    3078           0 :                 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
    3079           0 :                     wq_has_sleeper(&ctx->sq_data->wait))
    3080           0 :                         wake_up(&ctx->sq_data->wait);
    3081             : 
    3082           0 :                 mutex_unlock(&ctx->uring_lock);
    3083             :         }
    3084           0 : }
    3085             : 
    3086             : static bool io_bdev_nowait(struct block_device *bdev)
    3087             : {
    3088           0 :         return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
    3089             : }
    3090             : 
    3091             : /*
    3092             :  * If we tracked the file through the SCM inflight mechanism, we could support
    3093             :  * any file. For now, just ensure that anything potentially problematic is done
    3094             :  * inline.
    3095             :  */
    3096           0 : static bool __io_file_supports_nowait(struct file *file, umode_t mode)
    3097             : {
    3098           0 :         if (S_ISBLK(mode)) {
    3099           0 :                 if (IS_ENABLED(CONFIG_BLOCK) &&
    3100           0 :                     io_bdev_nowait(I_BDEV(file->f_mapping->host)))
    3101             :                         return true;
    3102           0 :                 return false;
    3103             :         }
    3104           0 :         if (S_ISSOCK(mode))
    3105             :                 return true;
    3106           0 :         if (S_ISREG(mode)) {
    3107           0 :                 if (IS_ENABLED(CONFIG_BLOCK) &&
    3108           0 :                     io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
    3109           0 :                     file->f_op != &io_uring_fops)
    3110             :                         return true;
    3111           0 :                 return false;
    3112             :         }
    3113             : 
    3114             :         /* any ->read/write should understand O_NONBLOCK */
    3115           0 :         if (file->f_flags & O_NONBLOCK)
    3116             :                 return true;
    3117           0 :         return file->f_mode & FMODE_NOWAIT;
    3118             : }
    3119             : 
    3120             : /*
    3121             :  * If we tracked the file through the SCM inflight mechanism, we could support
    3122             :  * any file. For now, just ensure that anything potentially problematic is done
    3123             :  * inline.
    3124             :  */
    3125           0 : static unsigned int io_file_get_flags(struct file *file)
    3126             : {
    3127           0 :         umode_t mode = file_inode(file)->i_mode;
    3128           0 :         unsigned int res = 0;
    3129             : 
    3130           0 :         if (S_ISREG(mode))
    3131           0 :                 res |= FFS_ISREG;
    3132           0 :         if (__io_file_supports_nowait(file, mode))
    3133           0 :                 res |= FFS_NOWAIT;
    3134           0 :         return res;
    3135             : }
    3136             : 
    3137             : static inline bool io_file_supports_nowait(struct io_kiocb *req)
    3138             : {
    3139           0 :         return req->flags & REQ_F_SUPPORT_NOWAIT;
    3140             : }
    3141             : 
    3142           0 : static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    3143             : {
    3144           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3145             :         unsigned ioprio;
    3146             :         int ret;
    3147             : 
    3148           0 :         kiocb->ki_pos = READ_ONCE(sqe->off);
    3149             : 
    3150           0 :         ioprio = READ_ONCE(sqe->ioprio);
    3151           0 :         if (ioprio) {
    3152           0 :                 ret = ioprio_check_cap(ioprio);
    3153           0 :                 if (ret)
    3154             :                         return ret;
    3155             : 
    3156           0 :                 kiocb->ki_ioprio = ioprio;
    3157             :         } else {
    3158           0 :                 kiocb->ki_ioprio = get_current_ioprio();
    3159             :         }
    3160             : 
    3161           0 :         req->imu = NULL;
    3162           0 :         req->rw.addr = READ_ONCE(sqe->addr);
    3163           0 :         req->rw.len = READ_ONCE(sqe->len);
    3164           0 :         req->rw.flags = READ_ONCE(sqe->rw_flags);
    3165           0 :         req->buf_index = READ_ONCE(sqe->buf_index);
    3166           0 :         return 0;
    3167             : }
    3168             : 
    3169             : static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
    3170             : {
    3171           0 :         switch (ret) {
    3172             :         case -EIOCBQUEUED:
    3173             :                 break;
    3174             :         case -ERESTARTSYS:
    3175             :         case -ERESTARTNOINTR:
    3176             :         case -ERESTARTNOHAND:
    3177             :         case -ERESTART_RESTARTBLOCK:
    3178             :                 /*
    3179             :                  * We can't just restart the syscall, since previously
    3180             :                  * submitted sqes may already be in progress. Just fail this
    3181             :                  * IO with EINTR.
    3182             :                  */
    3183           0 :                 ret = -EINTR;
    3184             :                 fallthrough;
    3185             :         default:
    3186           0 :                 kiocb->ki_complete(kiocb, ret);
    3187             :         }
    3188             : }
    3189             : 
    3190             : static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
    3191             : {
    3192           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3193             : 
    3194           0 :         if (kiocb->ki_pos != -1)
    3195           0 :                 return &kiocb->ki_pos;
    3196             : 
    3197           0 :         if (!(req->file->f_mode & FMODE_STREAM)) {
    3198           0 :                 req->flags |= REQ_F_CUR_POS;
    3199           0 :                 kiocb->ki_pos = req->file->f_pos;
    3200           0 :                 return &kiocb->ki_pos;
    3201             :         }
    3202             : 
    3203           0 :         kiocb->ki_pos = 0;
    3204             :         return NULL;
    3205             : }
    3206             : 
    3207           0 : static void kiocb_done(struct io_kiocb *req, ssize_t ret,
    3208             :                        unsigned int issue_flags)
    3209             : {
    3210           0 :         struct io_async_rw *io = req->async_data;
    3211             : 
    3212             :         /* add previously done IO, if any */
    3213           0 :         if (req_has_async_data(req) && io->bytes_done > 0) {
    3214           0 :                 if (ret < 0)
    3215           0 :                         ret = io->bytes_done;
    3216             :                 else
    3217           0 :                         ret += io->bytes_done;
    3218             :         }
    3219             : 
    3220           0 :         if (req->flags & REQ_F_CUR_POS)
    3221           0 :                 req->file->f_pos = req->rw.kiocb.ki_pos;
    3222           0 :         if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
    3223           0 :                 __io_complete_rw(req, ret, issue_flags);
    3224             :         else
    3225           0 :                 io_rw_done(&req->rw.kiocb, ret);
    3226             : 
    3227           0 :         if (req->flags & REQ_F_REISSUE) {
    3228           0 :                 req->flags &= ~REQ_F_REISSUE;
    3229           0 :                 if (io_resubmit_prep(req))
    3230             :                         io_req_task_queue_reissue(req);
    3231             :                 else
    3232           0 :                         io_req_task_queue_fail(req, ret);
    3233             :         }
    3234           0 : }
    3235             : 
    3236           0 : static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
    3237             :                              struct io_mapped_ubuf *imu)
    3238             : {
    3239           0 :         size_t len = req->rw.len;
    3240           0 :         u64 buf_end, buf_addr = req->rw.addr;
    3241             :         size_t offset;
    3242             : 
    3243           0 :         if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
    3244             :                 return -EFAULT;
    3245             :         /* not inside the mapped region */
    3246           0 :         if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
    3247             :                 return -EFAULT;
    3248             : 
    3249             :         /*
    3250             :          * May not be a start of buffer, set size appropriately
    3251             :          * and advance us to the beginning.
    3252             :          */
    3253           0 :         offset = buf_addr - imu->ubuf;
    3254           0 :         iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
    3255             : 
    3256           0 :         if (offset) {
    3257             :                 /*
    3258             :                  * Don't use iov_iter_advance() here, as it's really slow for
    3259             :                  * using the latter parts of a big fixed buffer - it iterates
    3260             :                  * over each segment manually. We can cheat a bit here, because
    3261             :                  * we know that:
    3262             :                  *
    3263             :                  * 1) it's a BVEC iter, we set it up
    3264             :                  * 2) all bvecs are PAGE_SIZE in size, except potentially the
    3265             :                  *    first and last bvec
    3266             :                  *
    3267             :                  * So just find our index, and adjust the iterator afterwards.
    3268             :                  * If the offset is within the first bvec (or the whole first
    3269             :                  * bvec, just use iov_iter_advance(). This makes it easier
    3270             :                  * since we can just skip the first segment, which may not
    3271             :                  * be PAGE_SIZE aligned.
    3272             :                  */
    3273           0 :                 const struct bio_vec *bvec = imu->bvec;
    3274             : 
    3275           0 :                 if (offset <= bvec->bv_len) {
    3276           0 :                         iov_iter_advance(iter, offset);
    3277             :                 } else {
    3278             :                         unsigned long seg_skip;
    3279             : 
    3280             :                         /* skip first vec */
    3281           0 :                         offset -= bvec->bv_len;
    3282           0 :                         seg_skip = 1 + (offset >> PAGE_SHIFT);
    3283             : 
    3284           0 :                         iter->bvec = bvec + seg_skip;
    3285           0 :                         iter->nr_segs -= seg_skip;
    3286           0 :                         iter->count -= bvec->bv_len + offset;
    3287           0 :                         iter->iov_offset = offset & ~PAGE_MASK;
    3288             :                 }
    3289             :         }
    3290             : 
    3291             :         return 0;
    3292             : }
    3293             : 
    3294           0 : static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
    3295             :                            unsigned int issue_flags)
    3296             : {
    3297           0 :         struct io_mapped_ubuf *imu = req->imu;
    3298           0 :         u16 index, buf_index = req->buf_index;
    3299             : 
    3300           0 :         if (likely(!imu)) {
    3301           0 :                 struct io_ring_ctx *ctx = req->ctx;
    3302             : 
    3303           0 :                 if (unlikely(buf_index >= ctx->nr_user_bufs))
    3304             :                         return -EFAULT;
    3305           0 :                 io_req_set_rsrc_node(req, ctx, issue_flags);
    3306           0 :                 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
    3307           0 :                 imu = READ_ONCE(ctx->user_bufs[index]);
    3308           0 :                 req->imu = imu;
    3309             :         }
    3310           0 :         return __io_import_fixed(req, rw, iter, imu);
    3311             : }
    3312             : 
    3313             : static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
    3314             : {
    3315           0 :         if (needs_lock)
    3316           0 :                 mutex_unlock(&ctx->uring_lock);
    3317             : }
    3318             : 
    3319             : static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
    3320             : {
    3321             :         /*
    3322             :          * "Normal" inline submissions always hold the uring_lock, since we
    3323             :          * grab it from the system call. Same is true for the SQPOLL offload.
    3324             :          * The only exception is when we've detached the request and issue it
    3325             :          * from an async worker thread, grab the lock for that case.
    3326             :          */
    3327           0 :         if (needs_lock)
    3328           0 :                 mutex_lock(&ctx->uring_lock);
    3329             : }
    3330             : 
    3331             : static void io_buffer_add_list(struct io_ring_ctx *ctx,
    3332             :                                struct io_buffer_list *bl, unsigned int bgid)
    3333             : {
    3334             :         struct list_head *list;
    3335             : 
    3336           0 :         list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
    3337           0 :         INIT_LIST_HEAD(&bl->buf_list);
    3338           0 :         bl->bgid = bgid;
    3339           0 :         list_add(&bl->list, list);
    3340             : }
    3341             : 
    3342           0 : static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
    3343             :                                           int bgid, unsigned int issue_flags)
    3344             : {
    3345           0 :         struct io_buffer *kbuf = req->kbuf;
    3346           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    3347           0 :         struct io_ring_ctx *ctx = req->ctx;
    3348             :         struct io_buffer_list *bl;
    3349             : 
    3350           0 :         if (req->flags & REQ_F_BUFFER_SELECTED)
    3351             :                 return kbuf;
    3352             : 
    3353           0 :         io_ring_submit_lock(ctx, needs_lock);
    3354             : 
    3355             :         lockdep_assert_held(&ctx->uring_lock);
    3356             : 
    3357           0 :         bl = io_buffer_get_list(ctx, bgid);
    3358           0 :         if (bl && !list_empty(&bl->buf_list)) {
    3359           0 :                 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
    3360           0 :                 list_del(&kbuf->list);
    3361           0 :                 if (*len > kbuf->len)
    3362           0 :                         *len = kbuf->len;
    3363           0 :                 req->flags |= REQ_F_BUFFER_SELECTED;
    3364           0 :                 req->kbuf = kbuf;
    3365             :         } else {
    3366             :                 kbuf = ERR_PTR(-ENOBUFS);
    3367             :         }
    3368             : 
    3369           0 :         io_ring_submit_unlock(req->ctx, needs_lock);
    3370             :         return kbuf;
    3371             : }
    3372             : 
    3373             : static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
    3374             :                                         unsigned int issue_flags)
    3375             : {
    3376             :         struct io_buffer *kbuf;
    3377             :         u16 bgid;
    3378             : 
    3379           0 :         bgid = req->buf_index;
    3380           0 :         kbuf = io_buffer_select(req, len, bgid, issue_flags);
    3381           0 :         if (IS_ERR(kbuf))
    3382             :                 return kbuf;
    3383           0 :         return u64_to_user_ptr(kbuf->addr);
    3384             : }
    3385             : 
    3386             : #ifdef CONFIG_COMPAT
    3387             : static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
    3388             :                                 unsigned int issue_flags)
    3389             : {
    3390             :         struct compat_iovec __user *uiov;
    3391             :         compat_ssize_t clen;
    3392             :         void __user *buf;
    3393             :         ssize_t len;
    3394             : 
    3395             :         uiov = u64_to_user_ptr(req->rw.addr);
    3396             :         if (!access_ok(uiov, sizeof(*uiov)))
    3397             :                 return -EFAULT;
    3398             :         if (__get_user(clen, &uiov->iov_len))
    3399             :                 return -EFAULT;
    3400             :         if (clen < 0)
    3401             :                 return -EINVAL;
    3402             : 
    3403             :         len = clen;
    3404             :         buf = io_rw_buffer_select(req, &len, issue_flags);
    3405             :         if (IS_ERR(buf))
    3406             :                 return PTR_ERR(buf);
    3407             :         iov[0].iov_base = buf;
    3408             :         iov[0].iov_len = (compat_size_t) len;
    3409             :         return 0;
    3410             : }
    3411             : #endif
    3412             : 
    3413           0 : static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
    3414             :                                       unsigned int issue_flags)
    3415             : {
    3416           0 :         struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
    3417             :         void __user *buf;
    3418             :         ssize_t len;
    3419             : 
    3420           0 :         if (copy_from_user(iov, uiov, sizeof(*uiov)))
    3421             :                 return -EFAULT;
    3422             : 
    3423           0 :         len = iov[0].iov_len;
    3424           0 :         if (len < 0)
    3425             :                 return -EINVAL;
    3426           0 :         buf = io_rw_buffer_select(req, &len, issue_flags);
    3427           0 :         if (IS_ERR(buf))
    3428           0 :                 return PTR_ERR(buf);
    3429           0 :         iov[0].iov_base = buf;
    3430           0 :         iov[0].iov_len = len;
    3431           0 :         return 0;
    3432             : }
    3433             : 
    3434           0 : static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
    3435             :                                     unsigned int issue_flags)
    3436             : {
    3437           0 :         if (req->flags & REQ_F_BUFFER_SELECTED) {
    3438           0 :                 struct io_buffer *kbuf = req->kbuf;
    3439             : 
    3440           0 :                 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
    3441           0 :                 iov[0].iov_len = kbuf->len;
    3442           0 :                 return 0;
    3443             :         }
    3444           0 :         if (req->rw.len != 1)
    3445             :                 return -EINVAL;
    3446             : 
    3447             : #ifdef CONFIG_COMPAT
    3448             :         if (req->ctx->compat)
    3449             :                 return io_compat_import(req, iov, issue_flags);
    3450             : #endif
    3451             : 
    3452           0 :         return __io_iov_buffer_select(req, iov, issue_flags);
    3453             : }
    3454             : 
    3455           0 : static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
    3456             :                                        struct io_rw_state *s,
    3457             :                                        unsigned int issue_flags)
    3458             : {
    3459           0 :         struct iov_iter *iter = &s->iter;
    3460           0 :         u8 opcode = req->opcode;
    3461             :         struct iovec *iovec;
    3462             :         void __user *buf;
    3463             :         size_t sqe_len;
    3464             :         ssize_t ret;
    3465             : 
    3466           0 :         if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
    3467           0 :                 ret = io_import_fixed(req, rw, iter, issue_flags);
    3468           0 :                 if (ret)
    3469           0 :                         return ERR_PTR(ret);
    3470             :                 return NULL;
    3471             :         }
    3472             : 
    3473             :         /* buffer index only valid with fixed read/write, or buffer select  */
    3474           0 :         if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)))
    3475             :                 return ERR_PTR(-EINVAL);
    3476             : 
    3477           0 :         buf = u64_to_user_ptr(req->rw.addr);
    3478           0 :         sqe_len = req->rw.len;
    3479             : 
    3480           0 :         if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
    3481           0 :                 if (req->flags & REQ_F_BUFFER_SELECT) {
    3482           0 :                         buf = io_rw_buffer_select(req, &sqe_len, issue_flags);
    3483           0 :                         if (IS_ERR(buf))
    3484             :                                 return ERR_CAST(buf);
    3485           0 :                         req->rw.len = sqe_len;
    3486             :                 }
    3487             : 
    3488           0 :                 ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
    3489           0 :                 if (ret)
    3490           0 :                         return ERR_PTR(ret);
    3491             :                 return NULL;
    3492             :         }
    3493             : 
    3494           0 :         iovec = s->fast_iov;
    3495           0 :         if (req->flags & REQ_F_BUFFER_SELECT) {
    3496           0 :                 ret = io_iov_buffer_select(req, iovec, issue_flags);
    3497           0 :                 if (ret)
    3498           0 :                         return ERR_PTR(ret);
    3499           0 :                 iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
    3500           0 :                 return NULL;
    3501             :         }
    3502             : 
    3503           0 :         ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
    3504           0 :                               req->ctx->compat);
    3505           0 :         if (unlikely(ret < 0))
    3506           0 :                 return ERR_PTR(ret);
    3507           0 :         return iovec;
    3508             : }
    3509             : 
    3510             : static inline int io_import_iovec(int rw, struct io_kiocb *req,
    3511             :                                   struct iovec **iovec, struct io_rw_state *s,
    3512             :                                   unsigned int issue_flags)
    3513             : {
    3514           0 :         *iovec = __io_import_iovec(rw, req, s, issue_flags);
    3515           0 :         if (unlikely(IS_ERR(*iovec)))
    3516           0 :                 return PTR_ERR(*iovec);
    3517             : 
    3518           0 :         iov_iter_save_state(&s->iter, &s->iter_state);
    3519             :         return 0;
    3520             : }
    3521             : 
    3522             : static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
    3523             : {
    3524           0 :         return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
    3525             : }
    3526             : 
    3527             : /*
    3528             :  * For files that don't have ->read_iter() and ->write_iter(), handle them
    3529             :  * by looping over ->read() or ->write() manually.
    3530             :  */
    3531           0 : static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
    3532             : {
    3533           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3534           0 :         struct file *file = req->file;
    3535           0 :         ssize_t ret = 0;
    3536             :         loff_t *ppos;
    3537             : 
    3538             :         /*
    3539             :          * Don't support polled IO through this interface, and we can't
    3540             :          * support non-blocking either. For the latter, this just causes
    3541             :          * the kiocb to be handled from an async context.
    3542             :          */
    3543           0 :         if (kiocb->ki_flags & IOCB_HIPRI)
    3544             :                 return -EOPNOTSUPP;
    3545           0 :         if ((kiocb->ki_flags & IOCB_NOWAIT) &&
    3546           0 :             !(kiocb->ki_filp->f_flags & O_NONBLOCK))
    3547             :                 return -EAGAIN;
    3548             : 
    3549           0 :         ppos = io_kiocb_ppos(kiocb);
    3550             : 
    3551           0 :         while (iov_iter_count(iter)) {
    3552             :                 struct iovec iovec;
    3553             :                 ssize_t nr;
    3554             : 
    3555           0 :                 if (!iov_iter_is_bvec(iter)) {
    3556             :                         iovec = iov_iter_iovec(iter);
    3557             :                 } else {
    3558           0 :                         iovec.iov_base = u64_to_user_ptr(req->rw.addr);
    3559           0 :                         iovec.iov_len = req->rw.len;
    3560             :                 }
    3561             : 
    3562           0 :                 if (rw == READ) {
    3563           0 :                         nr = file->f_op->read(file, iovec.iov_base,
    3564             :                                               iovec.iov_len, ppos);
    3565             :                 } else {
    3566           0 :                         nr = file->f_op->write(file, iovec.iov_base,
    3567             :                                                iovec.iov_len, ppos);
    3568             :                 }
    3569             : 
    3570           0 :                 if (nr < 0) {
    3571           0 :                         if (!ret)
    3572           0 :                                 ret = nr;
    3573             :                         break;
    3574             :                 }
    3575           0 :                 ret += nr;
    3576           0 :                 if (!iov_iter_is_bvec(iter)) {
    3577           0 :                         iov_iter_advance(iter, nr);
    3578             :                 } else {
    3579           0 :                         req->rw.addr += nr;
    3580           0 :                         req->rw.len -= nr;
    3581           0 :                         if (!req->rw.len)
    3582             :                                 break;
    3583             :                 }
    3584           0 :                 if (nr != iovec.iov_len)
    3585             :                         break;
    3586             :         }
    3587             : 
    3588             :         return ret;
    3589             : }
    3590             : 
    3591           0 : static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
    3592             :                           const struct iovec *fast_iov, struct iov_iter *iter)
    3593             : {
    3594           0 :         struct io_async_rw *rw = req->async_data;
    3595             : 
    3596           0 :         memcpy(&rw->s.iter, iter, sizeof(*iter));
    3597           0 :         rw->free_iovec = iovec;
    3598           0 :         rw->bytes_done = 0;
    3599             :         /* can only be fixed buffers, no need to do anything */
    3600           0 :         if (iov_iter_is_bvec(iter))
    3601             :                 return;
    3602           0 :         if (!iovec) {
    3603           0 :                 unsigned iov_off = 0;
    3604             : 
    3605           0 :                 rw->s.iter.iov = rw->s.fast_iov;
    3606           0 :                 if (iter->iov != fast_iov) {
    3607           0 :                         iov_off = iter->iov - fast_iov;
    3608           0 :                         rw->s.iter.iov += iov_off;
    3609             :                 }
    3610           0 :                 if (rw->s.fast_iov != fast_iov)
    3611           0 :                         memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
    3612           0 :                                sizeof(struct iovec) * iter->nr_segs);
    3613             :         } else {
    3614           0 :                 req->flags |= REQ_F_NEED_CLEANUP;
    3615             :         }
    3616             : }
    3617             : 
    3618           0 : static inline bool io_alloc_async_data(struct io_kiocb *req)
    3619             : {
    3620           0 :         WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
    3621           0 :         req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
    3622           0 :         if (req->async_data) {
    3623           0 :                 req->flags |= REQ_F_ASYNC_DATA;
    3624           0 :                 return false;
    3625             :         }
    3626             :         return true;
    3627             : }
    3628             : 
    3629           0 : static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
    3630             :                              struct io_rw_state *s, bool force)
    3631             : {
    3632           0 :         if (!force && !io_op_defs[req->opcode].needs_async_setup)
    3633             :                 return 0;
    3634           0 :         if (!req_has_async_data(req)) {
    3635             :                 struct io_async_rw *iorw;
    3636             : 
    3637           0 :                 if (io_alloc_async_data(req)) {
    3638           0 :                         kfree(iovec);
    3639           0 :                         return -ENOMEM;
    3640             :                 }
    3641             : 
    3642           0 :                 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
    3643           0 :                 iorw = req->async_data;
    3644             :                 /* we've copied and mapped the iter, ensure state is saved */
    3645           0 :                 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
    3646             :         }
    3647             :         return 0;
    3648             : }
    3649             : 
    3650           0 : static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
    3651             : {
    3652           0 :         struct io_async_rw *iorw = req->async_data;
    3653             :         struct iovec *iov;
    3654             :         int ret;
    3655             : 
    3656             :         /* submission path, ->uring_lock should already be taken */
    3657           0 :         ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
    3658           0 :         if (unlikely(ret < 0))
    3659             :                 return ret;
    3660             : 
    3661           0 :         iorw->bytes_done = 0;
    3662           0 :         iorw->free_iovec = iov;
    3663           0 :         if (iov)
    3664           0 :                 req->flags |= REQ_F_NEED_CLEANUP;
    3665             :         return 0;
    3666             : }
    3667             : 
    3668             : /*
    3669             :  * This is our waitqueue callback handler, registered through __folio_lock_async()
    3670             :  * when we initially tried to do the IO with the iocb armed our waitqueue.
    3671             :  * This gets called when the page is unlocked, and we generally expect that to
    3672             :  * happen when the page IO is completed and the page is now uptodate. This will
    3673             :  * queue a task_work based retry of the operation, attempting to copy the data
    3674             :  * again. If the latter fails because the page was NOT uptodate, then we will
    3675             :  * do a thread based blocking retry of the operation. That's the unexpected
    3676             :  * slow path.
    3677             :  */
    3678           0 : static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
    3679             :                              int sync, void *arg)
    3680             : {
    3681             :         struct wait_page_queue *wpq;
    3682           0 :         struct io_kiocb *req = wait->private;
    3683           0 :         struct wait_page_key *key = arg;
    3684             : 
    3685           0 :         wpq = container_of(wait, struct wait_page_queue, wait);
    3686             : 
    3687           0 :         if (!wake_page_match(wpq, key))
    3688             :                 return 0;
    3689             : 
    3690           0 :         req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
    3691           0 :         list_del_init(&wait->entry);
    3692           0 :         io_req_task_queue(req);
    3693           0 :         return 1;
    3694             : }
    3695             : 
    3696             : /*
    3697             :  * This controls whether a given IO request should be armed for async page
    3698             :  * based retry. If we return false here, the request is handed to the async
    3699             :  * worker threads for retry. If we're doing buffered reads on a regular file,
    3700             :  * we prepare a private wait_page_queue entry and retry the operation. This
    3701             :  * will either succeed because the page is now uptodate and unlocked, or it
    3702             :  * will register a callback when the page is unlocked at IO completion. Through
    3703             :  * that callback, io_uring uses task_work to setup a retry of the operation.
    3704             :  * That retry will attempt the buffered read again. The retry will generally
    3705             :  * succeed, or in rare cases where it fails, we then fall back to using the
    3706             :  * async worker threads for a blocking retry.
    3707             :  */
    3708           0 : static bool io_rw_should_retry(struct io_kiocb *req)
    3709             : {
    3710           0 :         struct io_async_rw *rw = req->async_data;
    3711           0 :         struct wait_page_queue *wait = &rw->wpq;
    3712           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3713             : 
    3714             :         /* never retry for NOWAIT, we just complete with -EAGAIN */
    3715           0 :         if (req->flags & REQ_F_NOWAIT)
    3716             :                 return false;
    3717             : 
    3718             :         /* Only for buffered IO */
    3719           0 :         if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
    3720             :                 return false;
    3721             : 
    3722             :         /*
    3723             :          * just use poll if we can, and don't attempt if the fs doesn't
    3724             :          * support callback based unlocks
    3725             :          */
    3726           0 :         if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
    3727             :                 return false;
    3728             : 
    3729           0 :         wait->wait.func = io_async_buf_func;
    3730           0 :         wait->wait.private = req;
    3731           0 :         wait->wait.flags = 0;
    3732           0 :         INIT_LIST_HEAD(&wait->wait.entry);
    3733           0 :         kiocb->ki_flags |= IOCB_WAITQ;
    3734           0 :         kiocb->ki_flags &= ~IOCB_NOWAIT;
    3735           0 :         kiocb->ki_waitq = wait;
    3736           0 :         return true;
    3737             : }
    3738             : 
    3739           0 : static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
    3740             : {
    3741           0 :         if (likely(req->file->f_op->read_iter))
    3742           0 :                 return call_read_iter(req->file, &req->rw.kiocb, iter);
    3743           0 :         else if (req->file->f_op->read)
    3744           0 :                 return loop_rw_iter(READ, req, iter);
    3745             :         else
    3746             :                 return -EINVAL;
    3747             : }
    3748             : 
    3749             : static bool need_read_all(struct io_kiocb *req)
    3750             : {
    3751           0 :         return req->flags & REQ_F_ISREG ||
    3752           0 :                 S_ISBLK(file_inode(req->file)->i_mode);
    3753             : }
    3754             : 
    3755           0 : static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
    3756             : {
    3757           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3758           0 :         struct io_ring_ctx *ctx = req->ctx;
    3759           0 :         struct file *file = req->file;
    3760             :         int ret;
    3761             : 
    3762           0 :         if (unlikely(!file || !(file->f_mode & mode)))
    3763             :                 return -EBADF;
    3764             : 
    3765           0 :         if (!io_req_ffs_set(req))
    3766           0 :                 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
    3767             : 
    3768           0 :         kiocb->ki_flags = iocb_flags(file);
    3769           0 :         ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
    3770           0 :         if (unlikely(ret))
    3771             :                 return ret;
    3772             : 
    3773             :         /*
    3774             :          * If the file is marked O_NONBLOCK, still allow retry for it if it
    3775             :          * supports async. Otherwise it's impossible to use O_NONBLOCK files
    3776             :          * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
    3777             :          */
    3778           0 :         if ((kiocb->ki_flags & IOCB_NOWAIT) ||
    3779           0 :             ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
    3780           0 :                 req->flags |= REQ_F_NOWAIT;
    3781             : 
    3782           0 :         if (ctx->flags & IORING_SETUP_IOPOLL) {
    3783           0 :                 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
    3784             :                         return -EOPNOTSUPP;
    3785             : 
    3786           0 :                 kiocb->private = NULL;
    3787           0 :                 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
    3788           0 :                 kiocb->ki_complete = io_complete_rw_iopoll;
    3789           0 :                 req->iopoll_completed = 0;
    3790             :         } else {
    3791           0 :                 if (kiocb->ki_flags & IOCB_HIPRI)
    3792             :                         return -EINVAL;
    3793           0 :                 kiocb->ki_complete = io_complete_rw;
    3794             :         }
    3795             : 
    3796             :         return 0;
    3797             : }
    3798             : 
    3799           0 : static int io_read(struct io_kiocb *req, unsigned int issue_flags)
    3800             : {
    3801           0 :         struct io_rw_state __s, *s = &__s;
    3802             :         struct iovec *iovec;
    3803           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3804           0 :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    3805             :         struct io_async_rw *rw;
    3806             :         ssize_t ret, ret2;
    3807             :         loff_t *ppos;
    3808             : 
    3809           0 :         if (!req_has_async_data(req)) {
    3810           0 :                 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
    3811           0 :                 if (unlikely(ret < 0))
    3812             :                         return ret;
    3813             :         } else {
    3814             :                 /*
    3815             :                  * Safe and required to re-import if we're using provided
    3816             :                  * buffers, as we dropped the selected one before retry.
    3817             :                  */
    3818           0 :                 if (req->flags & REQ_F_BUFFER_SELECT) {
    3819           0 :                         ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
    3820           0 :                         if (unlikely(ret < 0))
    3821             :                                 return ret;
    3822             :                 }
    3823             : 
    3824           0 :                 rw = req->async_data;
    3825           0 :                 s = &rw->s;
    3826             :                 /*
    3827             :                  * We come here from an earlier attempt, restore our state to
    3828             :                  * match in case it doesn't. It's cheap enough that we don't
    3829             :                  * need to make this conditional.
    3830             :                  */
    3831           0 :                 iov_iter_restore(&s->iter, &s->iter_state);
    3832           0 :                 iovec = NULL;
    3833             :         }
    3834           0 :         ret = io_rw_init_file(req, FMODE_READ);
    3835           0 :         if (unlikely(ret)) {
    3836           0 :                 kfree(iovec);
    3837           0 :                 return ret;
    3838             :         }
    3839           0 :         req->result = iov_iter_count(&s->iter);
    3840             : 
    3841           0 :         if (force_nonblock) {
    3842             :                 /* If the file doesn't support async, just async punt */
    3843           0 :                 if (unlikely(!io_file_supports_nowait(req))) {
    3844           0 :                         ret = io_setup_async_rw(req, iovec, s, true);
    3845           0 :                         return ret ?: -EAGAIN;
    3846             :                 }
    3847           0 :                 kiocb->ki_flags |= IOCB_NOWAIT;
    3848             :         } else {
    3849             :                 /* Ensure we clear previously set non-block flag */
    3850           0 :                 kiocb->ki_flags &= ~IOCB_NOWAIT;
    3851             :         }
    3852             : 
    3853           0 :         ppos = io_kiocb_update_pos(req);
    3854             : 
    3855           0 :         ret = rw_verify_area(READ, req->file, ppos, req->result);
    3856           0 :         if (unlikely(ret)) {
    3857           0 :                 kfree(iovec);
    3858           0 :                 return ret;
    3859             :         }
    3860             : 
    3861           0 :         ret = io_iter_do_read(req, &s->iter);
    3862             : 
    3863           0 :         if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
    3864           0 :                 req->flags &= ~REQ_F_REISSUE;
    3865             :                 /* if we can poll, just do that */
    3866           0 :                 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
    3867             :                         return -EAGAIN;
    3868             :                 /* IOPOLL retry should happen for io-wq threads */
    3869           0 :                 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
    3870             :                         goto done;
    3871             :                 /* no retry on NONBLOCK nor RWF_NOWAIT */
    3872           0 :                 if (req->flags & REQ_F_NOWAIT)
    3873             :                         goto done;
    3874             :                 ret = 0;
    3875           0 :         } else if (ret == -EIOCBQUEUED) {
    3876             :                 goto out_free;
    3877           0 :         } else if (ret == req->result || ret <= 0 || !force_nonblock ||
    3878           0 :                    (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
    3879             :                 /* read all, failed, already did sync or don't want to retry */
    3880             :                 goto done;
    3881             :         }
    3882             : 
    3883             :         /*
    3884             :          * Don't depend on the iter state matching what was consumed, or being
    3885             :          * untouched in case of error. Restore it and we'll advance it
    3886             :          * manually if we need to.
    3887             :          */
    3888           0 :         iov_iter_restore(&s->iter, &s->iter_state);
    3889             : 
    3890           0 :         ret2 = io_setup_async_rw(req, iovec, s, true);
    3891           0 :         if (ret2)
    3892             :                 return ret2;
    3893             : 
    3894           0 :         iovec = NULL;
    3895           0 :         rw = req->async_data;
    3896           0 :         s = &rw->s;
    3897             :         /*
    3898             :          * Now use our persistent iterator and state, if we aren't already.
    3899             :          * We've restored and mapped the iter to match.
    3900             :          */
    3901             : 
    3902             :         do {
    3903             :                 /*
    3904             :                  * We end up here because of a partial read, either from
    3905             :                  * above or inside this loop. Advance the iter by the bytes
    3906             :                  * that were consumed.
    3907             :                  */
    3908           0 :                 iov_iter_advance(&s->iter, ret);
    3909           0 :                 if (!iov_iter_count(&s->iter))
    3910             :                         break;
    3911           0 :                 rw->bytes_done += ret;
    3912           0 :                 iov_iter_save_state(&s->iter, &s->iter_state);
    3913             : 
    3914             :                 /* if we can retry, do so with the callbacks armed */
    3915           0 :                 if (!io_rw_should_retry(req)) {
    3916           0 :                         kiocb->ki_flags &= ~IOCB_WAITQ;
    3917           0 :                         return -EAGAIN;
    3918             :                 }
    3919             : 
    3920             :                 /*
    3921             :                  * Now retry read with the IOCB_WAITQ parts set in the iocb. If
    3922             :                  * we get -EIOCBQUEUED, then we'll get a notification when the
    3923             :                  * desired page gets unlocked. We can also get a partial read
    3924             :                  * here, and if we do, then just retry at the new offset.
    3925             :                  */
    3926           0 :                 ret = io_iter_do_read(req, &s->iter);
    3927           0 :                 if (ret == -EIOCBQUEUED)
    3928             :                         return 0;
    3929             :                 /* we got some bytes, but not all. retry. */
    3930           0 :                 kiocb->ki_flags &= ~IOCB_WAITQ;
    3931           0 :                 iov_iter_restore(&s->iter, &s->iter_state);
    3932           0 :         } while (ret > 0);
    3933             : done:
    3934           0 :         kiocb_done(req, ret, issue_flags);
    3935             : out_free:
    3936             :         /* it's faster to check here then delegate to kfree */
    3937           0 :         if (iovec)
    3938           0 :                 kfree(iovec);
    3939             :         return 0;
    3940             : }
    3941             : 
    3942           0 : static int io_write(struct io_kiocb *req, unsigned int issue_flags)
    3943             : {
    3944           0 :         struct io_rw_state __s, *s = &__s;
    3945             :         struct iovec *iovec;
    3946           0 :         struct kiocb *kiocb = &req->rw.kiocb;
    3947           0 :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    3948             :         ssize_t ret, ret2;
    3949             :         loff_t *ppos;
    3950             : 
    3951           0 :         if (!req_has_async_data(req)) {
    3952           0 :                 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
    3953           0 :                 if (unlikely(ret < 0))
    3954             :                         return ret;
    3955             :         } else {
    3956           0 :                 struct io_async_rw *rw = req->async_data;
    3957             : 
    3958           0 :                 s = &rw->s;
    3959           0 :                 iov_iter_restore(&s->iter, &s->iter_state);
    3960           0 :                 iovec = NULL;
    3961             :         }
    3962           0 :         ret = io_rw_init_file(req, FMODE_WRITE);
    3963           0 :         if (unlikely(ret)) {
    3964           0 :                 kfree(iovec);
    3965           0 :                 return ret;
    3966             :         }
    3967           0 :         req->result = iov_iter_count(&s->iter);
    3968             : 
    3969           0 :         if (force_nonblock) {
    3970             :                 /* If the file doesn't support async, just async punt */
    3971           0 :                 if (unlikely(!io_file_supports_nowait(req)))
    3972             :                         goto copy_iov;
    3973             : 
    3974             :                 /* file path doesn't support NOWAIT for non-direct_IO */
    3975           0 :                 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
    3976           0 :                     (req->flags & REQ_F_ISREG))
    3977             :                         goto copy_iov;
    3978             : 
    3979           0 :                 kiocb->ki_flags |= IOCB_NOWAIT;
    3980             :         } else {
    3981             :                 /* Ensure we clear previously set non-block flag */
    3982           0 :                 kiocb->ki_flags &= ~IOCB_NOWAIT;
    3983             :         }
    3984             : 
    3985           0 :         ppos = io_kiocb_update_pos(req);
    3986             : 
    3987           0 :         ret = rw_verify_area(WRITE, req->file, ppos, req->result);
    3988           0 :         if (unlikely(ret))
    3989             :                 goto out_free;
    3990             : 
    3991             :         /*
    3992             :          * Open-code file_start_write here to grab freeze protection,
    3993             :          * which will be released by another thread in
    3994             :          * io_complete_rw().  Fool lockdep by telling it the lock got
    3995             :          * released so that it doesn't complain about the held lock when
    3996             :          * we return to userspace.
    3997             :          */
    3998           0 :         if (req->flags & REQ_F_ISREG) {
    3999           0 :                 sb_start_write(file_inode(req->file)->i_sb);
    4000             :                 __sb_writers_release(file_inode(req->file)->i_sb,
    4001             :                                         SB_FREEZE_WRITE);
    4002             :         }
    4003           0 :         kiocb->ki_flags |= IOCB_WRITE;
    4004             : 
    4005           0 :         if (likely(req->file->f_op->write_iter))
    4006           0 :                 ret2 = call_write_iter(req->file, kiocb, &s->iter);
    4007           0 :         else if (req->file->f_op->write)
    4008           0 :                 ret2 = loop_rw_iter(WRITE, req, &s->iter);
    4009             :         else
    4010             :                 ret2 = -EINVAL;
    4011             : 
    4012           0 :         if (req->flags & REQ_F_REISSUE) {
    4013           0 :                 req->flags &= ~REQ_F_REISSUE;
    4014           0 :                 ret2 = -EAGAIN;
    4015             :         }
    4016             : 
    4017             :         /*
    4018             :          * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
    4019             :          * retry them without IOCB_NOWAIT.
    4020             :          */
    4021           0 :         if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
    4022           0 :                 ret2 = -EAGAIN;
    4023             :         /* no retry on NONBLOCK nor RWF_NOWAIT */
    4024           0 :         if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
    4025             :                 goto done;
    4026           0 :         if (!force_nonblock || ret2 != -EAGAIN) {
    4027             :                 /* IOPOLL retry should happen for io-wq threads */
    4028           0 :                 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
    4029             :                         goto copy_iov;
    4030             : done:
    4031           0 :                 kiocb_done(req, ret2, issue_flags);
    4032             :         } else {
    4033             : copy_iov:
    4034           0 :                 iov_iter_restore(&s->iter, &s->iter_state);
    4035           0 :                 ret = io_setup_async_rw(req, iovec, s, false);
    4036           0 :                 return ret ?: -EAGAIN;
    4037             :         }
    4038             : out_free:
    4039             :         /* it's reportedly faster than delegating the null check to kfree() */
    4040           0 :         if (iovec)
    4041           0 :                 kfree(iovec);
    4042             :         return ret;
    4043             : }
    4044             : 
    4045           0 : static int io_renameat_prep(struct io_kiocb *req,
    4046             :                             const struct io_uring_sqe *sqe)
    4047             : {
    4048           0 :         struct io_rename *ren = &req->rename;
    4049             :         const char __user *oldf, *newf;
    4050             : 
    4051           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4052             :                 return -EINVAL;
    4053           0 :         if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
    4054             :                 return -EINVAL;
    4055           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4056             :                 return -EBADF;
    4057             : 
    4058           0 :         ren->old_dfd = READ_ONCE(sqe->fd);
    4059           0 :         oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4060           0 :         newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    4061           0 :         ren->new_dfd = READ_ONCE(sqe->len);
    4062           0 :         ren->flags = READ_ONCE(sqe->rename_flags);
    4063             : 
    4064           0 :         ren->oldpath = getname(oldf);
    4065           0 :         if (IS_ERR(ren->oldpath))
    4066           0 :                 return PTR_ERR(ren->oldpath);
    4067             : 
    4068           0 :         ren->newpath = getname(newf);
    4069           0 :         if (IS_ERR(ren->newpath)) {
    4070           0 :                 putname(ren->oldpath);
    4071           0 :                 return PTR_ERR(ren->newpath);
    4072             :         }
    4073             : 
    4074           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4075           0 :         return 0;
    4076             : }
    4077             : 
    4078           0 : static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
    4079             : {
    4080           0 :         struct io_rename *ren = &req->rename;
    4081             :         int ret;
    4082             : 
    4083           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4084             :                 return -EAGAIN;
    4085             : 
    4086           0 :         ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
    4087           0 :                                 ren->newpath, ren->flags);
    4088             : 
    4089           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4090           0 :         if (ret < 0)
    4091           0 :                 req_set_fail(req);
    4092           0 :         io_req_complete(req, ret);
    4093           0 :         return 0;
    4094             : }
    4095             : 
    4096           0 : static int io_unlinkat_prep(struct io_kiocb *req,
    4097             :                             const struct io_uring_sqe *sqe)
    4098             : {
    4099           0 :         struct io_unlink *un = &req->unlink;
    4100             :         const char __user *fname;
    4101             : 
    4102           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4103             :                 return -EINVAL;
    4104           0 :         if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
    4105             :             sqe->splice_fd_in)
    4106             :                 return -EINVAL;
    4107           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4108             :                 return -EBADF;
    4109             : 
    4110           0 :         un->dfd = READ_ONCE(sqe->fd);
    4111             : 
    4112           0 :         un->flags = READ_ONCE(sqe->unlink_flags);
    4113           0 :         if (un->flags & ~AT_REMOVEDIR)
    4114             :                 return -EINVAL;
    4115             : 
    4116           0 :         fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4117           0 :         un->filename = getname(fname);
    4118           0 :         if (IS_ERR(un->filename))
    4119           0 :                 return PTR_ERR(un->filename);
    4120             : 
    4121           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4122           0 :         return 0;
    4123             : }
    4124             : 
    4125           0 : static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
    4126             : {
    4127           0 :         struct io_unlink *un = &req->unlink;
    4128             :         int ret;
    4129             : 
    4130           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4131             :                 return -EAGAIN;
    4132             : 
    4133           0 :         if (un->flags & AT_REMOVEDIR)
    4134           0 :                 ret = do_rmdir(un->dfd, un->filename);
    4135             :         else
    4136           0 :                 ret = do_unlinkat(un->dfd, un->filename);
    4137             : 
    4138           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4139           0 :         if (ret < 0)
    4140           0 :                 req_set_fail(req);
    4141           0 :         io_req_complete(req, ret);
    4142           0 :         return 0;
    4143             : }
    4144             : 
    4145           0 : static int io_mkdirat_prep(struct io_kiocb *req,
    4146             :                             const struct io_uring_sqe *sqe)
    4147             : {
    4148           0 :         struct io_mkdir *mkd = &req->mkdir;
    4149             :         const char __user *fname;
    4150             : 
    4151           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4152             :                 return -EINVAL;
    4153           0 :         if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
    4154             :             sqe->splice_fd_in)
    4155             :                 return -EINVAL;
    4156           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4157             :                 return -EBADF;
    4158             : 
    4159           0 :         mkd->dfd = READ_ONCE(sqe->fd);
    4160           0 :         mkd->mode = READ_ONCE(sqe->len);
    4161             : 
    4162           0 :         fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4163           0 :         mkd->filename = getname(fname);
    4164           0 :         if (IS_ERR(mkd->filename))
    4165           0 :                 return PTR_ERR(mkd->filename);
    4166             : 
    4167           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4168           0 :         return 0;
    4169             : }
    4170             : 
    4171           0 : static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
    4172             : {
    4173           0 :         struct io_mkdir *mkd = &req->mkdir;
    4174             :         int ret;
    4175             : 
    4176           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4177             :                 return -EAGAIN;
    4178             : 
    4179           0 :         ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
    4180             : 
    4181           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4182           0 :         if (ret < 0)
    4183           0 :                 req_set_fail(req);
    4184           0 :         io_req_complete(req, ret);
    4185           0 :         return 0;
    4186             : }
    4187             : 
    4188           0 : static int io_symlinkat_prep(struct io_kiocb *req,
    4189             :                             const struct io_uring_sqe *sqe)
    4190             : {
    4191           0 :         struct io_symlink *sl = &req->symlink;
    4192             :         const char __user *oldpath, *newpath;
    4193             : 
    4194           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4195             :                 return -EINVAL;
    4196           0 :         if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
    4197             :             sqe->splice_fd_in)
    4198             :                 return -EINVAL;
    4199           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4200             :                 return -EBADF;
    4201             : 
    4202           0 :         sl->new_dfd = READ_ONCE(sqe->fd);
    4203           0 :         oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4204           0 :         newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    4205             : 
    4206           0 :         sl->oldpath = getname(oldpath);
    4207           0 :         if (IS_ERR(sl->oldpath))
    4208           0 :                 return PTR_ERR(sl->oldpath);
    4209             : 
    4210           0 :         sl->newpath = getname(newpath);
    4211           0 :         if (IS_ERR(sl->newpath)) {
    4212           0 :                 putname(sl->oldpath);
    4213           0 :                 return PTR_ERR(sl->newpath);
    4214             :         }
    4215             : 
    4216           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4217           0 :         return 0;
    4218             : }
    4219             : 
    4220           0 : static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
    4221             : {
    4222           0 :         struct io_symlink *sl = &req->symlink;
    4223             :         int ret;
    4224             : 
    4225           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4226             :                 return -EAGAIN;
    4227             : 
    4228           0 :         ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
    4229             : 
    4230           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4231           0 :         if (ret < 0)
    4232           0 :                 req_set_fail(req);
    4233           0 :         io_req_complete(req, ret);
    4234           0 :         return 0;
    4235             : }
    4236             : 
    4237           0 : static int io_linkat_prep(struct io_kiocb *req,
    4238             :                             const struct io_uring_sqe *sqe)
    4239             : {
    4240           0 :         struct io_hardlink *lnk = &req->hardlink;
    4241             :         const char __user *oldf, *newf;
    4242             : 
    4243           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4244             :                 return -EINVAL;
    4245           0 :         if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
    4246             :                 return -EINVAL;
    4247           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4248             :                 return -EBADF;
    4249             : 
    4250           0 :         lnk->old_dfd = READ_ONCE(sqe->fd);
    4251           0 :         lnk->new_dfd = READ_ONCE(sqe->len);
    4252           0 :         oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4253           0 :         newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    4254           0 :         lnk->flags = READ_ONCE(sqe->hardlink_flags);
    4255             : 
    4256           0 :         lnk->oldpath = getname(oldf);
    4257           0 :         if (IS_ERR(lnk->oldpath))
    4258           0 :                 return PTR_ERR(lnk->oldpath);
    4259             : 
    4260           0 :         lnk->newpath = getname(newf);
    4261           0 :         if (IS_ERR(lnk->newpath)) {
    4262           0 :                 putname(lnk->oldpath);
    4263           0 :                 return PTR_ERR(lnk->newpath);
    4264             :         }
    4265             : 
    4266           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4267           0 :         return 0;
    4268             : }
    4269             : 
    4270           0 : static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
    4271             : {
    4272           0 :         struct io_hardlink *lnk = &req->hardlink;
    4273             :         int ret;
    4274             : 
    4275           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4276             :                 return -EAGAIN;
    4277             : 
    4278           0 :         ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
    4279             :                                 lnk->newpath, lnk->flags);
    4280             : 
    4281           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4282           0 :         if (ret < 0)
    4283           0 :                 req_set_fail(req);
    4284           0 :         io_req_complete(req, ret);
    4285           0 :         return 0;
    4286             : }
    4287             : 
    4288             : static int io_shutdown_prep(struct io_kiocb *req,
    4289             :                             const struct io_uring_sqe *sqe)
    4290             : {
    4291             : #if defined(CONFIG_NET)
    4292             :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4293             :                 return -EINVAL;
    4294             :         if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
    4295             :                      sqe->buf_index || sqe->splice_fd_in))
    4296             :                 return -EINVAL;
    4297             : 
    4298             :         req->shutdown.how = READ_ONCE(sqe->len);
    4299             :         return 0;
    4300             : #else
    4301             :         return -EOPNOTSUPP;
    4302             : #endif
    4303             : }
    4304             : 
    4305             : static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
    4306             : {
    4307             : #if defined(CONFIG_NET)
    4308             :         struct socket *sock;
    4309             :         int ret;
    4310             : 
    4311             :         if (issue_flags & IO_URING_F_NONBLOCK)
    4312             :                 return -EAGAIN;
    4313             : 
    4314             :         sock = sock_from_file(req->file);
    4315             :         if (unlikely(!sock))
    4316             :                 return -ENOTSOCK;
    4317             : 
    4318             :         ret = __sys_shutdown_sock(sock, req->shutdown.how);
    4319             :         if (ret < 0)
    4320             :                 req_set_fail(req);
    4321             :         io_req_complete(req, ret);
    4322             :         return 0;
    4323             : #else
    4324             :         return -EOPNOTSUPP;
    4325             : #endif
    4326             : }
    4327             : 
    4328             : static int __io_splice_prep(struct io_kiocb *req,
    4329             :                             const struct io_uring_sqe *sqe)
    4330             : {
    4331           0 :         struct io_splice *sp = &req->splice;
    4332           0 :         unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
    4333             : 
    4334           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4335             :                 return -EINVAL;
    4336             : 
    4337           0 :         sp->len = READ_ONCE(sqe->len);
    4338           0 :         sp->flags = READ_ONCE(sqe->splice_flags);
    4339           0 :         if (unlikely(sp->flags & ~valid_flags))
    4340             :                 return -EINVAL;
    4341           0 :         sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
    4342             :         return 0;
    4343             : }
    4344             : 
    4345             : static int io_tee_prep(struct io_kiocb *req,
    4346             :                        const struct io_uring_sqe *sqe)
    4347             : {
    4348           0 :         if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
    4349             :                 return -EINVAL;
    4350             :         return __io_splice_prep(req, sqe);
    4351             : }
    4352             : 
    4353           0 : static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
    4354             : {
    4355           0 :         struct io_splice *sp = &req->splice;
    4356           0 :         struct file *out = sp->file_out;
    4357           0 :         unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
    4358             :         struct file *in;
    4359           0 :         long ret = 0;
    4360             : 
    4361           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4362             :                 return -EAGAIN;
    4363             : 
    4364           0 :         if (sp->flags & SPLICE_F_FD_IN_FIXED)
    4365           0 :                 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
    4366             :         else
    4367           0 :                 in = io_file_get_normal(req, sp->splice_fd_in);
    4368           0 :         if (!in) {
    4369             :                 ret = -EBADF;
    4370             :                 goto done;
    4371             :         }
    4372             : 
    4373           0 :         if (sp->len)
    4374           0 :                 ret = do_tee(in, out, sp->len, flags);
    4375             : 
    4376           0 :         if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
    4377             :                 io_put_file(in);
    4378             : done:
    4379           0 :         if (ret != sp->len)
    4380           0 :                 req_set_fail(req);
    4381           0 :         io_req_complete(req, ret);
    4382           0 :         return 0;
    4383             : }
    4384             : 
    4385             : static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4386             : {
    4387           0 :         struct io_splice *sp = &req->splice;
    4388             : 
    4389           0 :         sp->off_in = READ_ONCE(sqe->splice_off_in);
    4390           0 :         sp->off_out = READ_ONCE(sqe->off);
    4391             :         return __io_splice_prep(req, sqe);
    4392             : }
    4393             : 
    4394           0 : static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
    4395             : {
    4396           0 :         struct io_splice *sp = &req->splice;
    4397           0 :         struct file *out = sp->file_out;
    4398           0 :         unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
    4399             :         loff_t *poff_in, *poff_out;
    4400             :         struct file *in;
    4401           0 :         long ret = 0;
    4402             : 
    4403           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4404             :                 return -EAGAIN;
    4405             : 
    4406           0 :         if (sp->flags & SPLICE_F_FD_IN_FIXED)
    4407           0 :                 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
    4408             :         else
    4409           0 :                 in = io_file_get_normal(req, sp->splice_fd_in);
    4410           0 :         if (!in) {
    4411             :                 ret = -EBADF;
    4412             :                 goto done;
    4413             :         }
    4414             : 
    4415           0 :         poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
    4416           0 :         poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
    4417             : 
    4418           0 :         if (sp->len)
    4419           0 :                 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
    4420             : 
    4421           0 :         if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
    4422             :                 io_put_file(in);
    4423             : done:
    4424           0 :         if (ret != sp->len)
    4425           0 :                 req_set_fail(req);
    4426           0 :         io_req_complete(req, ret);
    4427           0 :         return 0;
    4428             : }
    4429             : 
    4430             : /*
    4431             :  * IORING_OP_NOP just posts a completion event, nothing else.
    4432             :  */
    4433           0 : static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
    4434             : {
    4435           0 :         struct io_ring_ctx *ctx = req->ctx;
    4436             : 
    4437           0 :         if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
    4438             :                 return -EINVAL;
    4439             : 
    4440             :         __io_req_complete(req, issue_flags, 0, 0);
    4441             :         return 0;
    4442             : }
    4443             : 
    4444           0 : static int io_msg_ring_prep(struct io_kiocb *req,
    4445             :                             const struct io_uring_sqe *sqe)
    4446             : {
    4447           0 :         if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags ||
    4448             :                      sqe->splice_fd_in || sqe->buf_index || sqe->personality))
    4449             :                 return -EINVAL;
    4450             : 
    4451           0 :         req->msg.user_data = READ_ONCE(sqe->off);
    4452           0 :         req->msg.len = READ_ONCE(sqe->len);
    4453             :         return 0;
    4454             : }
    4455             : 
    4456           0 : static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
    4457             : {
    4458             :         struct io_ring_ctx *target_ctx;
    4459           0 :         struct io_msg *msg = &req->msg;
    4460             :         bool filled;
    4461             :         int ret;
    4462             : 
    4463           0 :         ret = -EBADFD;
    4464           0 :         if (req->file->f_op != &io_uring_fops)
    4465             :                 goto done;
    4466             : 
    4467           0 :         ret = -EOVERFLOW;
    4468           0 :         target_ctx = req->file->private_data;
    4469             : 
    4470           0 :         spin_lock(&target_ctx->completion_lock);
    4471           0 :         filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
    4472           0 :         io_commit_cqring(target_ctx);
    4473           0 :         spin_unlock(&target_ctx->completion_lock);
    4474             : 
    4475           0 :         if (filled) {
    4476           0 :                 io_cqring_ev_posted(target_ctx);
    4477           0 :                 ret = 0;
    4478             :         }
    4479             : 
    4480             : done:
    4481           0 :         if (ret < 0)
    4482           0 :                 req_set_fail(req);
    4483           0 :         __io_req_complete(req, issue_flags, ret, 0);
    4484             :         /* put file to avoid an attempt to IOPOLL the req */
    4485           0 :         io_put_file(req->file);
    4486           0 :         req->file = NULL;
    4487           0 :         return 0;
    4488             : }
    4489             : 
    4490           0 : static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4491             : {
    4492           0 :         struct io_ring_ctx *ctx = req->ctx;
    4493             : 
    4494           0 :         if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
    4495             :                 return -EINVAL;
    4496           0 :         if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
    4497             :                      sqe->splice_fd_in))
    4498             :                 return -EINVAL;
    4499             : 
    4500           0 :         req->sync.flags = READ_ONCE(sqe->fsync_flags);
    4501           0 :         if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
    4502             :                 return -EINVAL;
    4503             : 
    4504           0 :         req->sync.off = READ_ONCE(sqe->off);
    4505           0 :         req->sync.len = READ_ONCE(sqe->len);
    4506           0 :         return 0;
    4507             : }
    4508             : 
    4509           0 : static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
    4510             : {
    4511           0 :         loff_t end = req->sync.off + req->sync.len;
    4512             :         int ret;
    4513             : 
    4514             :         /* fsync always requires a blocking context */
    4515           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4516             :                 return -EAGAIN;
    4517             : 
    4518           0 :         ret = vfs_fsync_range(req->file, req->sync.off,
    4519             :                                 end > 0 ? end : LLONG_MAX,
    4520           0 :                                 req->sync.flags & IORING_FSYNC_DATASYNC);
    4521           0 :         if (ret < 0)
    4522           0 :                 req_set_fail(req);
    4523           0 :         io_req_complete(req, ret);
    4524           0 :         return 0;
    4525             : }
    4526             : 
    4527             : static int io_fallocate_prep(struct io_kiocb *req,
    4528             :                              const struct io_uring_sqe *sqe)
    4529             : {
    4530           0 :         if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
    4531           0 :             sqe->splice_fd_in)
    4532             :                 return -EINVAL;
    4533           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4534             :                 return -EINVAL;
    4535             : 
    4536           0 :         req->sync.off = READ_ONCE(sqe->off);
    4537           0 :         req->sync.len = READ_ONCE(sqe->addr);
    4538           0 :         req->sync.mode = READ_ONCE(sqe->len);
    4539             :         return 0;
    4540             : }
    4541             : 
    4542           0 : static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
    4543             : {
    4544             :         int ret;
    4545             : 
    4546             :         /* fallocate always requiring blocking context */
    4547           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4548             :                 return -EAGAIN;
    4549           0 :         ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
    4550             :                                 req->sync.len);
    4551           0 :         if (ret < 0)
    4552           0 :                 req_set_fail(req);
    4553             :         else
    4554           0 :                 fsnotify_modify(req->file);
    4555           0 :         io_req_complete(req, ret);
    4556           0 :         return 0;
    4557             : }
    4558             : 
    4559           0 : static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4560             : {
    4561             :         const char __user *fname;
    4562             :         int ret;
    4563             : 
    4564           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4565             :                 return -EINVAL;
    4566           0 :         if (unlikely(sqe->ioprio || sqe->buf_index))
    4567             :                 return -EINVAL;
    4568           0 :         if (unlikely(req->flags & REQ_F_FIXED_FILE))
    4569             :                 return -EBADF;
    4570             : 
    4571             :         /* open.how should be already initialised */
    4572           0 :         if (!(req->open.how.flags & O_PATH) && force_o_largefile())
    4573           0 :                 req->open.how.flags |= O_LARGEFILE;
    4574             : 
    4575           0 :         req->open.dfd = READ_ONCE(sqe->fd);
    4576           0 :         fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4577           0 :         req->open.filename = getname(fname);
    4578           0 :         if (IS_ERR(req->open.filename)) {
    4579           0 :                 ret = PTR_ERR(req->open.filename);
    4580           0 :                 req->open.filename = NULL;
    4581           0 :                 return ret;
    4582             :         }
    4583             : 
    4584           0 :         req->open.file_slot = READ_ONCE(sqe->file_index);
    4585           0 :         if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
    4586             :                 return -EINVAL;
    4587             : 
    4588           0 :         req->open.nofile = rlimit(RLIMIT_NOFILE);
    4589           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    4590           0 :         return 0;
    4591             : }
    4592             : 
    4593           0 : static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4594             : {
    4595           0 :         u64 mode = READ_ONCE(sqe->len);
    4596           0 :         u64 flags = READ_ONCE(sqe->open_flags);
    4597             : 
    4598           0 :         req->open.how = build_open_how(flags, mode);
    4599           0 :         return __io_openat_prep(req, sqe);
    4600             : }
    4601             : 
    4602           0 : static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4603             : {
    4604             :         struct open_how __user *how;
    4605             :         size_t len;
    4606             :         int ret;
    4607             : 
    4608           0 :         how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    4609           0 :         len = READ_ONCE(sqe->len);
    4610           0 :         if (len < OPEN_HOW_SIZE_VER0)
    4611             :                 return -EINVAL;
    4612             : 
    4613           0 :         ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
    4614             :                                         len);
    4615           0 :         if (ret)
    4616             :                 return ret;
    4617             : 
    4618           0 :         return __io_openat_prep(req, sqe);
    4619             : }
    4620             : 
    4621           0 : static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
    4622             : {
    4623             :         struct open_flags op;
    4624             :         struct file *file;
    4625             :         bool resolve_nonblock, nonblock_set;
    4626           0 :         bool fixed = !!req->open.file_slot;
    4627             :         int ret;
    4628             : 
    4629           0 :         ret = build_open_flags(&req->open.how, &op);
    4630           0 :         if (ret)
    4631             :                 goto err;
    4632           0 :         nonblock_set = op.open_flag & O_NONBLOCK;
    4633           0 :         resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
    4634           0 :         if (issue_flags & IO_URING_F_NONBLOCK) {
    4635             :                 /*
    4636             :                  * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
    4637             :                  * it'll always -EAGAIN
    4638             :                  */
    4639           0 :                 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
    4640             :                         return -EAGAIN;
    4641           0 :                 op.lookup_flags |= LOOKUP_CACHED;
    4642           0 :                 op.open_flag |= O_NONBLOCK;
    4643             :         }
    4644             : 
    4645           0 :         if (!fixed) {
    4646           0 :                 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
    4647           0 :                 if (ret < 0)
    4648             :                         goto err;
    4649             :         }
    4650             : 
    4651           0 :         file = do_filp_open(req->open.dfd, req->open.filename, &op);
    4652           0 :         if (IS_ERR(file)) {
    4653             :                 /*
    4654             :                  * We could hang on to this 'fd' on retrying, but seems like
    4655             :                  * marginal gain for something that is now known to be a slower
    4656             :                  * path. So just put it, and we'll get a new one when we retry.
    4657             :                  */
    4658           0 :                 if (!fixed)
    4659           0 :                         put_unused_fd(ret);
    4660             : 
    4661           0 :                 ret = PTR_ERR(file);
    4662             :                 /* only retry if RESOLVE_CACHED wasn't already set by application */
    4663           0 :                 if (ret == -EAGAIN &&
    4664           0 :                     (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
    4665             :                         return -EAGAIN;
    4666             :                 goto err;
    4667             :         }
    4668             : 
    4669           0 :         if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
    4670           0 :                 file->f_flags &= ~O_NONBLOCK;
    4671           0 :         fsnotify_open(file);
    4672             : 
    4673           0 :         if (!fixed)
    4674           0 :                 fd_install(ret, file);
    4675             :         else
    4676           0 :                 ret = io_install_fixed_file(req, file, issue_flags,
    4677           0 :                                             req->open.file_slot - 1);
    4678             : err:
    4679           0 :         putname(req->open.filename);
    4680           0 :         req->flags &= ~REQ_F_NEED_CLEANUP;
    4681           0 :         if (ret < 0)
    4682           0 :                 req_set_fail(req);
    4683             :         __io_req_complete(req, issue_flags, ret, 0);
    4684             :         return 0;
    4685             : }
    4686             : 
    4687             : static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
    4688             : {
    4689           0 :         return io_openat2(req, issue_flags);
    4690             : }
    4691             : 
    4692           0 : static int io_remove_buffers_prep(struct io_kiocb *req,
    4693             :                                   const struct io_uring_sqe *sqe)
    4694             : {
    4695           0 :         struct io_provide_buf *p = &req->pbuf;
    4696             :         u64 tmp;
    4697             : 
    4698           0 :         if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
    4699           0 :             sqe->splice_fd_in)
    4700             :                 return -EINVAL;
    4701             : 
    4702           0 :         tmp = READ_ONCE(sqe->fd);
    4703           0 :         if (!tmp || tmp > USHRT_MAX)
    4704             :                 return -EINVAL;
    4705             : 
    4706           0 :         memset(p, 0, sizeof(*p));
    4707           0 :         p->nbufs = tmp;
    4708           0 :         p->bgid = READ_ONCE(sqe->buf_group);
    4709           0 :         return 0;
    4710             : }
    4711             : 
    4712           0 : static int __io_remove_buffers(struct io_ring_ctx *ctx,
    4713             :                                struct io_buffer_list *bl, unsigned nbufs)
    4714             : {
    4715           0 :         unsigned i = 0;
    4716             : 
    4717             :         /* shouldn't happen */
    4718           0 :         if (!nbufs)
    4719             :                 return 0;
    4720             : 
    4721             :         /* the head kbuf is the list itself */
    4722           0 :         while (!list_empty(&bl->buf_list)) {
    4723             :                 struct io_buffer *nxt;
    4724             : 
    4725           0 :                 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
    4726           0 :                 list_del(&nxt->list);
    4727           0 :                 if (++i == nbufs)
    4728           0 :                         return i;
    4729           0 :                 cond_resched();
    4730             :         }
    4731           0 :         i++;
    4732             : 
    4733           0 :         return i;
    4734             : }
    4735             : 
    4736           0 : static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
    4737             : {
    4738           0 :         struct io_provide_buf *p = &req->pbuf;
    4739           0 :         struct io_ring_ctx *ctx = req->ctx;
    4740             :         struct io_buffer_list *bl;
    4741           0 :         int ret = 0;
    4742           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    4743             : 
    4744           0 :         io_ring_submit_lock(ctx, needs_lock);
    4745             : 
    4746             :         lockdep_assert_held(&ctx->uring_lock);
    4747             : 
    4748           0 :         ret = -ENOENT;
    4749           0 :         bl = io_buffer_get_list(ctx, p->bgid);
    4750           0 :         if (bl)
    4751           0 :                 ret = __io_remove_buffers(ctx, bl, p->nbufs);
    4752           0 :         if (ret < 0)
    4753           0 :                 req_set_fail(req);
    4754             : 
    4755             :         /* complete before unlock, IOPOLL may need the lock */
    4756           0 :         __io_req_complete(req, issue_flags, ret, 0);
    4757           0 :         io_ring_submit_unlock(ctx, needs_lock);
    4758           0 :         return 0;
    4759             : }
    4760             : 
    4761           0 : static int io_provide_buffers_prep(struct io_kiocb *req,
    4762             :                                    const struct io_uring_sqe *sqe)
    4763             : {
    4764             :         unsigned long size, tmp_check;
    4765           0 :         struct io_provide_buf *p = &req->pbuf;
    4766             :         u64 tmp;
    4767             : 
    4768           0 :         if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
    4769             :                 return -EINVAL;
    4770             : 
    4771           0 :         tmp = READ_ONCE(sqe->fd);
    4772           0 :         if (!tmp || tmp > USHRT_MAX)
    4773             :                 return -E2BIG;
    4774           0 :         p->nbufs = tmp;
    4775           0 :         p->addr = READ_ONCE(sqe->addr);
    4776           0 :         p->len = READ_ONCE(sqe->len);
    4777             : 
    4778           0 :         if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
    4779             :                                 &size))
    4780             :                 return -EOVERFLOW;
    4781           0 :         if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
    4782             :                 return -EOVERFLOW;
    4783             : 
    4784           0 :         size = (unsigned long)p->len * p->nbufs;
    4785           0 :         if (!access_ok(u64_to_user_ptr(p->addr), size))
    4786             :                 return -EFAULT;
    4787             : 
    4788           0 :         p->bgid = READ_ONCE(sqe->buf_group);
    4789           0 :         tmp = READ_ONCE(sqe->off);
    4790           0 :         if (tmp > USHRT_MAX)
    4791             :                 return -E2BIG;
    4792           0 :         p->bid = tmp;
    4793           0 :         return 0;
    4794             : }
    4795             : 
    4796           0 : static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
    4797             : {
    4798             :         struct io_buffer *buf;
    4799             :         struct page *page;
    4800             :         int bufs_in_page;
    4801             : 
    4802             :         /*
    4803             :          * Completions that don't happen inline (eg not under uring_lock) will
    4804             :          * add to ->io_buffers_comp. If we don't have any free buffers, check
    4805             :          * the completion list and splice those entries first.
    4806             :          */
    4807           0 :         if (!list_empty_careful(&ctx->io_buffers_comp)) {
    4808           0 :                 spin_lock(&ctx->completion_lock);
    4809           0 :                 if (!list_empty(&ctx->io_buffers_comp)) {
    4810           0 :                         list_splice_init(&ctx->io_buffers_comp,
    4811             :                                                 &ctx->io_buffers_cache);
    4812           0 :                         spin_unlock(&ctx->completion_lock);
    4813           0 :                         return 0;
    4814             :                 }
    4815           0 :                 spin_unlock(&ctx->completion_lock);
    4816             :         }
    4817             : 
    4818             :         /*
    4819             :          * No free buffers and no completion entries either. Allocate a new
    4820             :          * page worth of buffer entries and add those to our freelist.
    4821             :          */
    4822           0 :         page = alloc_page(GFP_KERNEL_ACCOUNT);
    4823           0 :         if (!page)
    4824             :                 return -ENOMEM;
    4825             : 
    4826           0 :         list_add(&page->lru, &ctx->io_buffers_pages);
    4827             : 
    4828           0 :         buf = page_address(page);
    4829           0 :         bufs_in_page = PAGE_SIZE / sizeof(*buf);
    4830           0 :         while (bufs_in_page) {
    4831           0 :                 list_add_tail(&buf->list, &ctx->io_buffers_cache);
    4832           0 :                 buf++;
    4833           0 :                 bufs_in_page--;
    4834             :         }
    4835             : 
    4836             :         return 0;
    4837             : }
    4838             : 
    4839           0 : static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
    4840             :                           struct io_buffer_list *bl)
    4841             : {
    4842             :         struct io_buffer *buf;
    4843           0 :         u64 addr = pbuf->addr;
    4844           0 :         int i, bid = pbuf->bid;
    4845             : 
    4846           0 :         for (i = 0; i < pbuf->nbufs; i++) {
    4847           0 :                 if (list_empty(&ctx->io_buffers_cache) &&
    4848           0 :                     io_refill_buffer_cache(ctx))
    4849             :                         break;
    4850           0 :                 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
    4851             :                                         list);
    4852           0 :                 list_move_tail(&buf->list, &bl->buf_list);
    4853           0 :                 buf->addr = addr;
    4854           0 :                 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
    4855           0 :                 buf->bid = bid;
    4856           0 :                 buf->bgid = pbuf->bgid;
    4857           0 :                 addr += pbuf->len;
    4858           0 :                 bid++;
    4859           0 :                 cond_resched();
    4860             :         }
    4861             : 
    4862           0 :         return i ? 0 : -ENOMEM;
    4863             : }
    4864             : 
    4865           0 : static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
    4866             : {
    4867           0 :         struct io_provide_buf *p = &req->pbuf;
    4868           0 :         struct io_ring_ctx *ctx = req->ctx;
    4869             :         struct io_buffer_list *bl;
    4870           0 :         int ret = 0;
    4871           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    4872             : 
    4873           0 :         io_ring_submit_lock(ctx, needs_lock);
    4874             : 
    4875             :         lockdep_assert_held(&ctx->uring_lock);
    4876             : 
    4877           0 :         bl = io_buffer_get_list(ctx, p->bgid);
    4878           0 :         if (unlikely(!bl)) {
    4879           0 :                 bl = kmalloc(sizeof(*bl), GFP_KERNEL);
    4880           0 :                 if (!bl) {
    4881             :                         ret = -ENOMEM;
    4882             :                         goto err;
    4883             :                 }
    4884           0 :                 io_buffer_add_list(ctx, bl, p->bgid);
    4885             :         }
    4886             : 
    4887           0 :         ret = io_add_buffers(ctx, p, bl);
    4888             : err:
    4889           0 :         if (ret < 0)
    4890           0 :                 req_set_fail(req);
    4891             :         /* complete before unlock, IOPOLL may need the lock */
    4892           0 :         __io_req_complete(req, issue_flags, ret, 0);
    4893           0 :         io_ring_submit_unlock(ctx, needs_lock);
    4894           0 :         return 0;
    4895             : }
    4896             : 
    4897           0 : static int io_epoll_ctl_prep(struct io_kiocb *req,
    4898             :                              const struct io_uring_sqe *sqe)
    4899             : {
    4900             : #if defined(CONFIG_EPOLL)
    4901           0 :         if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
    4902             :                 return -EINVAL;
    4903           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4904             :                 return -EINVAL;
    4905             : 
    4906           0 :         req->epoll.epfd = READ_ONCE(sqe->fd);
    4907           0 :         req->epoll.op = READ_ONCE(sqe->len);
    4908           0 :         req->epoll.fd = READ_ONCE(sqe->off);
    4909             : 
    4910           0 :         if (ep_op_has_event(req->epoll.op)) {
    4911             :                 struct epoll_event __user *ev;
    4912             : 
    4913           0 :                 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
    4914           0 :                 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
    4915             :                         return -EFAULT;
    4916             :         }
    4917             : 
    4918             :         return 0;
    4919             : #else
    4920             :         return -EOPNOTSUPP;
    4921             : #endif
    4922             : }
    4923             : 
    4924           0 : static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
    4925             : {
    4926             : #if defined(CONFIG_EPOLL)
    4927           0 :         struct io_epoll *ie = &req->epoll;
    4928             :         int ret;
    4929           0 :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    4930             : 
    4931           0 :         ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
    4932           0 :         if (force_nonblock && ret == -EAGAIN)
    4933             :                 return -EAGAIN;
    4934             : 
    4935           0 :         if (ret < 0)
    4936           0 :                 req_set_fail(req);
    4937             :         __io_req_complete(req, issue_flags, ret, 0);
    4938             :         return 0;
    4939             : #else
    4940             :         return -EOPNOTSUPP;
    4941             : #endif
    4942             : }
    4943             : 
    4944             : static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4945             : {
    4946             : #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
    4947           0 :         if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
    4948             :                 return -EINVAL;
    4949           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4950             :                 return -EINVAL;
    4951             : 
    4952           0 :         req->madvise.addr = READ_ONCE(sqe->addr);
    4953           0 :         req->madvise.len = READ_ONCE(sqe->len);
    4954           0 :         req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
    4955             :         return 0;
    4956             : #else
    4957             :         return -EOPNOTSUPP;
    4958             : #endif
    4959             : }
    4960             : 
    4961           0 : static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
    4962             : {
    4963             : #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
    4964           0 :         struct io_madvise *ma = &req->madvise;
    4965             :         int ret;
    4966             : 
    4967           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    4968             :                 return -EAGAIN;
    4969             : 
    4970           0 :         ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
    4971           0 :         if (ret < 0)
    4972           0 :                 req_set_fail(req);
    4973           0 :         io_req_complete(req, ret);
    4974           0 :         return 0;
    4975             : #else
    4976             :         return -EOPNOTSUPP;
    4977             : #endif
    4978             : }
    4979             : 
    4980             : static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    4981             : {
    4982           0 :         if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
    4983             :                 return -EINVAL;
    4984           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    4985             :                 return -EINVAL;
    4986             : 
    4987           0 :         req->fadvise.offset = READ_ONCE(sqe->off);
    4988           0 :         req->fadvise.len = READ_ONCE(sqe->len);
    4989           0 :         req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
    4990             :         return 0;
    4991             : }
    4992             : 
    4993           0 : static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
    4994             : {
    4995           0 :         struct io_fadvise *fa = &req->fadvise;
    4996             :         int ret;
    4997             : 
    4998           0 :         if (issue_flags & IO_URING_F_NONBLOCK) {
    4999           0 :                 switch (fa->advice) {
    5000             :                 case POSIX_FADV_NORMAL:
    5001             :                 case POSIX_FADV_RANDOM:
    5002             :                 case POSIX_FADV_SEQUENTIAL:
    5003             :                         break;
    5004             :                 default:
    5005             :                         return -EAGAIN;
    5006             :                 }
    5007             :         }
    5008             : 
    5009           0 :         ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
    5010           0 :         if (ret < 0)
    5011           0 :                 req_set_fail(req);
    5012             :         __io_req_complete(req, issue_flags, ret, 0);
    5013             :         return 0;
    5014             : }
    5015             : 
    5016           0 : static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5017             : {
    5018             :         const char __user *path;
    5019             : 
    5020           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5021             :                 return -EINVAL;
    5022           0 :         if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
    5023             :                 return -EINVAL;
    5024           0 :         if (req->flags & REQ_F_FIXED_FILE)
    5025             :                 return -EBADF;
    5026             : 
    5027           0 :         req->statx.dfd = READ_ONCE(sqe->fd);
    5028           0 :         req->statx.mask = READ_ONCE(sqe->len);
    5029           0 :         path = u64_to_user_ptr(READ_ONCE(sqe->addr));
    5030           0 :         req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    5031           0 :         req->statx.flags = READ_ONCE(sqe->statx_flags);
    5032             : 
    5033           0 :         req->statx.filename = getname_flags(path,
    5034             :                                         getname_statx_lookup_flags(req->statx.flags),
    5035             :                                         NULL);
    5036             : 
    5037           0 :         if (IS_ERR(req->statx.filename)) {
    5038           0 :                 int ret = PTR_ERR(req->statx.filename);
    5039             : 
    5040           0 :                 req->statx.filename = NULL;
    5041           0 :                 return ret;
    5042             :         }
    5043             : 
    5044           0 :         req->flags |= REQ_F_NEED_CLEANUP;
    5045           0 :         return 0;
    5046             : }
    5047             : 
    5048           0 : static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
    5049             : {
    5050           0 :         struct io_statx *ctx = &req->statx;
    5051             :         int ret;
    5052             : 
    5053           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    5054             :                 return -EAGAIN;
    5055             : 
    5056           0 :         ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
    5057             :                        ctx->buffer);
    5058             : 
    5059           0 :         if (ret < 0)
    5060           0 :                 req_set_fail(req);
    5061           0 :         io_req_complete(req, ret);
    5062           0 :         return 0;
    5063             : }
    5064             : 
    5065           0 : static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5066             : {
    5067           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5068             :                 return -EINVAL;
    5069           0 :         if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
    5070           0 :             sqe->rw_flags || sqe->buf_index)
    5071             :                 return -EINVAL;
    5072           0 :         if (req->flags & REQ_F_FIXED_FILE)
    5073             :                 return -EBADF;
    5074             : 
    5075           0 :         req->close.fd = READ_ONCE(sqe->fd);
    5076           0 :         req->close.file_slot = READ_ONCE(sqe->file_index);
    5077           0 :         if (req->close.file_slot && req->close.fd)
    5078             :                 return -EINVAL;
    5079             : 
    5080           0 :         return 0;
    5081             : }
    5082             : 
    5083           0 : static int io_close(struct io_kiocb *req, unsigned int issue_flags)
    5084             : {
    5085           0 :         struct files_struct *files = current->files;
    5086           0 :         struct io_close *close = &req->close;
    5087             :         struct fdtable *fdt;
    5088           0 :         struct file *file = NULL;
    5089           0 :         int ret = -EBADF;
    5090             : 
    5091           0 :         if (req->close.file_slot) {
    5092           0 :                 ret = io_close_fixed(req, issue_flags);
    5093           0 :                 goto err;
    5094             :         }
    5095             : 
    5096           0 :         spin_lock(&files->file_lock);
    5097           0 :         fdt = files_fdtable(files);
    5098           0 :         if (close->fd >= fdt->max_fds) {
    5099           0 :                 spin_unlock(&files->file_lock);
    5100             :                 goto err;
    5101             :         }
    5102           0 :         file = fdt->fd[close->fd];
    5103           0 :         if (!file || file->f_op == &io_uring_fops) {
    5104           0 :                 spin_unlock(&files->file_lock);
    5105           0 :                 file = NULL;
    5106           0 :                 goto err;
    5107             :         }
    5108             : 
    5109             :         /* if the file has a flush method, be safe and punt to async */
    5110           0 :         if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
    5111           0 :                 spin_unlock(&files->file_lock);
    5112           0 :                 return -EAGAIN;
    5113             :         }
    5114             : 
    5115           0 :         ret = __close_fd_get_file(close->fd, &file);
    5116           0 :         spin_unlock(&files->file_lock);
    5117           0 :         if (ret < 0) {
    5118           0 :                 if (ret == -ENOENT)
    5119           0 :                         ret = -EBADF;
    5120             :                 goto err;
    5121             :         }
    5122             : 
    5123             :         /* No ->flush() or already async, safely close from here */
    5124           0 :         ret = filp_close(file, current->files);
    5125             : err:
    5126           0 :         if (ret < 0)
    5127           0 :                 req_set_fail(req);
    5128           0 :         if (file)
    5129           0 :                 fput(file);
    5130             :         __io_req_complete(req, issue_flags, ret, 0);
    5131             :         return 0;
    5132             : }
    5133             : 
    5134             : static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5135             : {
    5136           0 :         struct io_ring_ctx *ctx = req->ctx;
    5137             : 
    5138           0 :         if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
    5139             :                 return -EINVAL;
    5140           0 :         if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
    5141             :                      sqe->splice_fd_in))
    5142             :                 return -EINVAL;
    5143             : 
    5144           0 :         req->sync.off = READ_ONCE(sqe->off);
    5145           0 :         req->sync.len = READ_ONCE(sqe->len);
    5146           0 :         req->sync.flags = READ_ONCE(sqe->sync_range_flags);
    5147             :         return 0;
    5148             : }
    5149             : 
    5150           0 : static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
    5151             : {
    5152             :         int ret;
    5153             : 
    5154             :         /* sync_file_range always requires a blocking context */
    5155           0 :         if (issue_flags & IO_URING_F_NONBLOCK)
    5156             :                 return -EAGAIN;
    5157             : 
    5158           0 :         ret = sync_file_range(req->file, req->sync.off, req->sync.len,
    5159           0 :                                 req->sync.flags);
    5160           0 :         if (ret < 0)
    5161           0 :                 req_set_fail(req);
    5162           0 :         io_req_complete(req, ret);
    5163           0 :         return 0;
    5164             : }
    5165             : 
    5166             : #if defined(CONFIG_NET)
    5167             : static int io_setup_async_msg(struct io_kiocb *req,
    5168             :                               struct io_async_msghdr *kmsg)
    5169             : {
    5170             :         struct io_async_msghdr *async_msg = req->async_data;
    5171             : 
    5172             :         if (async_msg)
    5173             :                 return -EAGAIN;
    5174             :         if (io_alloc_async_data(req)) {
    5175             :                 kfree(kmsg->free_iov);
    5176             :                 return -ENOMEM;
    5177             :         }
    5178             :         async_msg = req->async_data;
    5179             :         req->flags |= REQ_F_NEED_CLEANUP;
    5180             :         memcpy(async_msg, kmsg, sizeof(*kmsg));
    5181             :         async_msg->msg.msg_name = &async_msg->addr;
    5182             :         /* if were using fast_iov, set it to the new one */
    5183             :         if (!async_msg->free_iov)
    5184             :                 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
    5185             : 
    5186             :         return -EAGAIN;
    5187             : }
    5188             : 
    5189             : static int io_sendmsg_copy_hdr(struct io_kiocb *req,
    5190             :                                struct io_async_msghdr *iomsg)
    5191             : {
    5192             :         iomsg->msg.msg_name = &iomsg->addr;
    5193             :         iomsg->free_iov = iomsg->fast_iov;
    5194             :         return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
    5195             :                                    req->sr_msg.msg_flags, &iomsg->free_iov);
    5196             : }
    5197             : 
    5198             : static int io_sendmsg_prep_async(struct io_kiocb *req)
    5199             : {
    5200             :         int ret;
    5201             : 
    5202             :         ret = io_sendmsg_copy_hdr(req, req->async_data);
    5203             :         if (!ret)
    5204             :                 req->flags |= REQ_F_NEED_CLEANUP;
    5205             :         return ret;
    5206             : }
    5207             : 
    5208             : static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5209             : {
    5210             :         struct io_sr_msg *sr = &req->sr_msg;
    5211             : 
    5212             :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5213             :                 return -EINVAL;
    5214             :         if (unlikely(sqe->addr2 || sqe->file_index))
    5215             :                 return -EINVAL;
    5216             : 
    5217             :         sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
    5218             :         sr->len = READ_ONCE(sqe->len);
    5219             :         sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
    5220             :         if (sr->msg_flags & MSG_DONTWAIT)
    5221             :                 req->flags |= REQ_F_NOWAIT;
    5222             : 
    5223             : #ifdef CONFIG_COMPAT
    5224             :         if (req->ctx->compat)
    5225             :                 sr->msg_flags |= MSG_CMSG_COMPAT;
    5226             : #endif
    5227             :         return 0;
    5228             : }
    5229             : 
    5230             : static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
    5231             : {
    5232             :         struct io_async_msghdr iomsg, *kmsg;
    5233             :         struct socket *sock;
    5234             :         unsigned flags;
    5235             :         int min_ret = 0;
    5236             :         int ret;
    5237             : 
    5238             :         sock = sock_from_file(req->file);
    5239             :         if (unlikely(!sock))
    5240             :                 return -ENOTSOCK;
    5241             : 
    5242             :         if (req_has_async_data(req)) {
    5243             :                 kmsg = req->async_data;
    5244             :         } else {
    5245             :                 ret = io_sendmsg_copy_hdr(req, &iomsg);
    5246             :                 if (ret)
    5247             :                         return ret;
    5248             :                 kmsg = &iomsg;
    5249             :         }
    5250             : 
    5251             :         flags = req->sr_msg.msg_flags;
    5252             :         if (issue_flags & IO_URING_F_NONBLOCK)
    5253             :                 flags |= MSG_DONTWAIT;
    5254             :         if (flags & MSG_WAITALL)
    5255             :                 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
    5256             : 
    5257             :         ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
    5258             : 
    5259             :         if (ret < min_ret) {
    5260             :                 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
    5261             :                         return io_setup_async_msg(req, kmsg);
    5262             :                 if (ret == -ERESTARTSYS)
    5263             :                         ret = -EINTR;
    5264             :                 req_set_fail(req);
    5265             :         }
    5266             :         /* fast path, check for non-NULL to avoid function call */
    5267             :         if (kmsg->free_iov)
    5268             :                 kfree(kmsg->free_iov);
    5269             :         req->flags &= ~REQ_F_NEED_CLEANUP;
    5270             :         __io_req_complete(req, issue_flags, ret, 0);
    5271             :         return 0;
    5272             : }
    5273             : 
    5274             : static int io_send(struct io_kiocb *req, unsigned int issue_flags)
    5275             : {
    5276             :         struct io_sr_msg *sr = &req->sr_msg;
    5277             :         struct msghdr msg;
    5278             :         struct iovec iov;
    5279             :         struct socket *sock;
    5280             :         unsigned flags;
    5281             :         int min_ret = 0;
    5282             :         int ret;
    5283             : 
    5284             :         sock = sock_from_file(req->file);
    5285             :         if (unlikely(!sock))
    5286             :                 return -ENOTSOCK;
    5287             : 
    5288             :         ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
    5289             :         if (unlikely(ret))
    5290             :                 return ret;
    5291             : 
    5292             :         msg.msg_name = NULL;
    5293             :         msg.msg_control = NULL;
    5294             :         msg.msg_controllen = 0;
    5295             :         msg.msg_namelen = 0;
    5296             : 
    5297             :         flags = req->sr_msg.msg_flags;
    5298             :         if (issue_flags & IO_URING_F_NONBLOCK)
    5299             :                 flags |= MSG_DONTWAIT;
    5300             :         if (flags & MSG_WAITALL)
    5301             :                 min_ret = iov_iter_count(&msg.msg_iter);
    5302             : 
    5303             :         msg.msg_flags = flags;
    5304             :         ret = sock_sendmsg(sock, &msg);
    5305             :         if (ret < min_ret) {
    5306             :                 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
    5307             :                         return -EAGAIN;
    5308             :                 if (ret == -ERESTARTSYS)
    5309             :                         ret = -EINTR;
    5310             :                 req_set_fail(req);
    5311             :         }
    5312             :         __io_req_complete(req, issue_flags, ret, 0);
    5313             :         return 0;
    5314             : }
    5315             : 
    5316             : static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
    5317             :                                  struct io_async_msghdr *iomsg)
    5318             : {
    5319             :         struct io_sr_msg *sr = &req->sr_msg;
    5320             :         struct iovec __user *uiov;
    5321             :         size_t iov_len;
    5322             :         int ret;
    5323             : 
    5324             :         ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
    5325             :                                         &iomsg->uaddr, &uiov, &iov_len);
    5326             :         if (ret)
    5327             :                 return ret;
    5328             : 
    5329             :         if (req->flags & REQ_F_BUFFER_SELECT) {
    5330             :                 if (iov_len > 1)
    5331             :                         return -EINVAL;
    5332             :                 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
    5333             :                         return -EFAULT;
    5334             :                 sr->len = iomsg->fast_iov[0].iov_len;
    5335             :                 iomsg->free_iov = NULL;
    5336             :         } else {
    5337             :                 iomsg->free_iov = iomsg->fast_iov;
    5338             :                 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
    5339             :                                      &iomsg->free_iov, &iomsg->msg.msg_iter,
    5340             :                                      false);
    5341             :                 if (ret > 0)
    5342             :                         ret = 0;
    5343             :         }
    5344             : 
    5345             :         return ret;
    5346             : }
    5347             : 
    5348             : #ifdef CONFIG_COMPAT
    5349             : static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
    5350             :                                         struct io_async_msghdr *iomsg)
    5351             : {
    5352             :         struct io_sr_msg *sr = &req->sr_msg;
    5353             :         struct compat_iovec __user *uiov;
    5354             :         compat_uptr_t ptr;
    5355             :         compat_size_t len;
    5356             :         int ret;
    5357             : 
    5358             :         ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
    5359             :                                   &ptr, &len);
    5360             :         if (ret)
    5361             :                 return ret;
    5362             : 
    5363             :         uiov = compat_ptr(ptr);
    5364             :         if (req->flags & REQ_F_BUFFER_SELECT) {
    5365             :                 compat_ssize_t clen;
    5366             : 
    5367             :                 if (len > 1)
    5368             :                         return -EINVAL;
    5369             :                 if (!access_ok(uiov, sizeof(*uiov)))
    5370             :                         return -EFAULT;
    5371             :                 if (__get_user(clen, &uiov->iov_len))
    5372             :                         return -EFAULT;
    5373             :                 if (clen < 0)
    5374             :                         return -EINVAL;
    5375             :                 sr->len = clen;
    5376             :                 iomsg->free_iov = NULL;
    5377             :         } else {
    5378             :                 iomsg->free_iov = iomsg->fast_iov;
    5379             :                 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
    5380             :                                    UIO_FASTIOV, &iomsg->free_iov,
    5381             :                                    &iomsg->msg.msg_iter, true);
    5382             :                 if (ret < 0)
    5383             :                         return ret;
    5384             :         }
    5385             : 
    5386             :         return 0;
    5387             : }
    5388             : #endif
    5389             : 
    5390             : static int io_recvmsg_copy_hdr(struct io_kiocb *req,
    5391             :                                struct io_async_msghdr *iomsg)
    5392             : {
    5393             :         iomsg->msg.msg_name = &iomsg->addr;
    5394             : 
    5395             : #ifdef CONFIG_COMPAT
    5396             :         if (req->ctx->compat)
    5397             :                 return __io_compat_recvmsg_copy_hdr(req, iomsg);
    5398             : #endif
    5399             : 
    5400             :         return __io_recvmsg_copy_hdr(req, iomsg);
    5401             : }
    5402             : 
    5403             : static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
    5404             :                                                unsigned int issue_flags)
    5405             : {
    5406             :         struct io_sr_msg *sr = &req->sr_msg;
    5407             : 
    5408             :         return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
    5409             : }
    5410             : 
    5411             : static int io_recvmsg_prep_async(struct io_kiocb *req)
    5412             : {
    5413             :         int ret;
    5414             : 
    5415             :         ret = io_recvmsg_copy_hdr(req, req->async_data);
    5416             :         if (!ret)
    5417             :                 req->flags |= REQ_F_NEED_CLEANUP;
    5418             :         return ret;
    5419             : }
    5420             : 
    5421             : static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5422             : {
    5423             :         struct io_sr_msg *sr = &req->sr_msg;
    5424             : 
    5425             :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5426             :                 return -EINVAL;
    5427             :         if (unlikely(sqe->addr2 || sqe->file_index))
    5428             :                 return -EINVAL;
    5429             : 
    5430             :         sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
    5431             :         sr->len = READ_ONCE(sqe->len);
    5432             :         sr->bgid = READ_ONCE(sqe->buf_group);
    5433             :         sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
    5434             :         if (sr->msg_flags & MSG_DONTWAIT)
    5435             :                 req->flags |= REQ_F_NOWAIT;
    5436             : 
    5437             : #ifdef CONFIG_COMPAT
    5438             :         if (req->ctx->compat)
    5439             :                 sr->msg_flags |= MSG_CMSG_COMPAT;
    5440             : #endif
    5441             :         sr->done_io = 0;
    5442             :         return 0;
    5443             : }
    5444             : 
    5445             : static bool io_net_retry(struct socket *sock, int flags)
    5446             : {
    5447             :         if (!(flags & MSG_WAITALL))
    5448             :                 return false;
    5449             :         return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
    5450             : }
    5451             : 
    5452             : static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
    5453             : {
    5454             :         struct io_async_msghdr iomsg, *kmsg;
    5455             :         struct io_sr_msg *sr = &req->sr_msg;
    5456             :         struct socket *sock;
    5457             :         struct io_buffer *kbuf;
    5458             :         unsigned flags;
    5459             :         int ret, min_ret = 0;
    5460             :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    5461             : 
    5462             :         sock = sock_from_file(req->file);
    5463             :         if (unlikely(!sock))
    5464             :                 return -ENOTSOCK;
    5465             : 
    5466             :         if (req_has_async_data(req)) {
    5467             :                 kmsg = req->async_data;
    5468             :         } else {
    5469             :                 ret = io_recvmsg_copy_hdr(req, &iomsg);
    5470             :                 if (ret)
    5471             :                         return ret;
    5472             :                 kmsg = &iomsg;
    5473             :         }
    5474             : 
    5475             :         if (req->flags & REQ_F_BUFFER_SELECT) {
    5476             :                 kbuf = io_recv_buffer_select(req, issue_flags);
    5477             :                 if (IS_ERR(kbuf))
    5478             :                         return PTR_ERR(kbuf);
    5479             :                 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
    5480             :                 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
    5481             :                 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
    5482             :                                 1, req->sr_msg.len);
    5483             :         }
    5484             : 
    5485             :         flags = req->sr_msg.msg_flags;
    5486             :         if (force_nonblock)
    5487             :                 flags |= MSG_DONTWAIT;
    5488             :         if (flags & MSG_WAITALL)
    5489             :                 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
    5490             : 
    5491             :         ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
    5492             :                                         kmsg->uaddr, flags);
    5493             :         if (ret < min_ret) {
    5494             :                 if (ret == -EAGAIN && force_nonblock)
    5495             :                         return io_setup_async_msg(req, kmsg);
    5496             :                 if (ret == -ERESTARTSYS)
    5497             :                         ret = -EINTR;
    5498             :                 if (ret > 0 && io_net_retry(sock, flags)) {
    5499             :                         sr->done_io += ret;
    5500             :                         req->flags |= REQ_F_PARTIAL_IO;
    5501             :                         return io_setup_async_msg(req, kmsg);
    5502             :                 }
    5503             :                 req_set_fail(req);
    5504             :         } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
    5505             :                 req_set_fail(req);
    5506             :         }
    5507             : 
    5508             :         /* fast path, check for non-NULL to avoid function call */
    5509             :         if (kmsg->free_iov)
    5510             :                 kfree(kmsg->free_iov);
    5511             :         req->flags &= ~REQ_F_NEED_CLEANUP;
    5512             :         if (ret >= 0)
    5513             :                 ret += sr->done_io;
    5514             :         else if (sr->done_io)
    5515             :                 ret = sr->done_io;
    5516             :         __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
    5517             :         return 0;
    5518             : }
    5519             : 
    5520             : static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
    5521             : {
    5522             :         struct io_buffer *kbuf;
    5523             :         struct io_sr_msg *sr = &req->sr_msg;
    5524             :         struct msghdr msg;
    5525             :         void __user *buf = sr->buf;
    5526             :         struct socket *sock;
    5527             :         struct iovec iov;
    5528             :         unsigned flags;
    5529             :         int ret, min_ret = 0;
    5530             :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    5531             : 
    5532             :         sock = sock_from_file(req->file);
    5533             :         if (unlikely(!sock))
    5534             :                 return -ENOTSOCK;
    5535             : 
    5536             :         if (req->flags & REQ_F_BUFFER_SELECT) {
    5537             :                 kbuf = io_recv_buffer_select(req, issue_flags);
    5538             :                 if (IS_ERR(kbuf))
    5539             :                         return PTR_ERR(kbuf);
    5540             :                 buf = u64_to_user_ptr(kbuf->addr);
    5541             :         }
    5542             : 
    5543             :         ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
    5544             :         if (unlikely(ret))
    5545             :                 goto out_free;
    5546             : 
    5547             :         msg.msg_name = NULL;
    5548             :         msg.msg_control = NULL;
    5549             :         msg.msg_controllen = 0;
    5550             :         msg.msg_namelen = 0;
    5551             :         msg.msg_iocb = NULL;
    5552             :         msg.msg_flags = 0;
    5553             : 
    5554             :         flags = req->sr_msg.msg_flags;
    5555             :         if (force_nonblock)
    5556             :                 flags |= MSG_DONTWAIT;
    5557             :         if (flags & MSG_WAITALL)
    5558             :                 min_ret = iov_iter_count(&msg.msg_iter);
    5559             : 
    5560             :         ret = sock_recvmsg(sock, &msg, flags);
    5561             :         if (ret < min_ret) {
    5562             :                 if (ret == -EAGAIN && force_nonblock)
    5563             :                         return -EAGAIN;
    5564             :                 if (ret == -ERESTARTSYS)
    5565             :                         ret = -EINTR;
    5566             :                 if (ret > 0 && io_net_retry(sock, flags)) {
    5567             :                         sr->len -= ret;
    5568             :                         sr->buf += ret;
    5569             :                         sr->done_io += ret;
    5570             :                         req->flags |= REQ_F_PARTIAL_IO;
    5571             :                         return -EAGAIN;
    5572             :                 }
    5573             :                 req_set_fail(req);
    5574             :         } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
    5575             : out_free:
    5576             :                 req_set_fail(req);
    5577             :         }
    5578             : 
    5579             :         if (ret >= 0)
    5580             :                 ret += sr->done_io;
    5581             :         else if (sr->done_io)
    5582             :                 ret = sr->done_io;
    5583             :         __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
    5584             :         return 0;
    5585             : }
    5586             : 
    5587             : static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5588             : {
    5589             :         struct io_accept *accept = &req->accept;
    5590             : 
    5591             :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5592             :                 return -EINVAL;
    5593             :         if (sqe->ioprio || sqe->len || sqe->buf_index)
    5594             :                 return -EINVAL;
    5595             : 
    5596             :         accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
    5597             :         accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
    5598             :         accept->flags = READ_ONCE(sqe->accept_flags);
    5599             :         accept->nofile = rlimit(RLIMIT_NOFILE);
    5600             : 
    5601             :         accept->file_slot = READ_ONCE(sqe->file_index);
    5602             :         if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
    5603             :                 return -EINVAL;
    5604             :         if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
    5605             :                 return -EINVAL;
    5606             :         if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
    5607             :                 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
    5608             :         return 0;
    5609             : }
    5610             : 
    5611             : static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
    5612             : {
    5613             :         struct io_accept *accept = &req->accept;
    5614             :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    5615             :         unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
    5616             :         bool fixed = !!accept->file_slot;
    5617             :         struct file *file;
    5618             :         int ret, fd;
    5619             : 
    5620             :         if (!fixed) {
    5621             :                 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
    5622             :                 if (unlikely(fd < 0))
    5623             :                         return fd;
    5624             :         }
    5625             :         file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
    5626             :                          accept->flags);
    5627             :         if (IS_ERR(file)) {
    5628             :                 if (!fixed)
    5629             :                         put_unused_fd(fd);
    5630             :                 ret = PTR_ERR(file);
    5631             :                 if (ret == -EAGAIN && force_nonblock)
    5632             :                         return -EAGAIN;
    5633             :                 if (ret == -ERESTARTSYS)
    5634             :                         ret = -EINTR;
    5635             :                 req_set_fail(req);
    5636             :         } else if (!fixed) {
    5637             :                 fd_install(fd, file);
    5638             :                 ret = fd;
    5639             :         } else {
    5640             :                 ret = io_install_fixed_file(req, file, issue_flags,
    5641             :                                             accept->file_slot - 1);
    5642             :         }
    5643             :         __io_req_complete(req, issue_flags, ret, 0);
    5644             :         return 0;
    5645             : }
    5646             : 
    5647             : static int io_connect_prep_async(struct io_kiocb *req)
    5648             : {
    5649             :         struct io_async_connect *io = req->async_data;
    5650             :         struct io_connect *conn = &req->connect;
    5651             : 
    5652             :         return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
    5653             : }
    5654             : 
    5655             : static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    5656             : {
    5657             :         struct io_connect *conn = &req->connect;
    5658             : 
    5659             :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    5660             :                 return -EINVAL;
    5661             :         if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
    5662             :             sqe->splice_fd_in)
    5663             :                 return -EINVAL;
    5664             : 
    5665             :         conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
    5666             :         conn->addr_len =  READ_ONCE(sqe->addr2);
    5667             :         return 0;
    5668             : }
    5669             : 
    5670             : static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
    5671             : {
    5672             :         struct io_async_connect __io, *io;
    5673             :         unsigned file_flags;
    5674             :         int ret;
    5675             :         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
    5676             : 
    5677             :         if (req_has_async_data(req)) {
    5678             :                 io = req->async_data;
    5679             :         } else {
    5680             :                 ret = move_addr_to_kernel(req->connect.addr,
    5681             :                                                 req->connect.addr_len,
    5682             :                                                 &__io.address);
    5683             :                 if (ret)
    5684             :                         goto out;
    5685             :                 io = &__io;
    5686             :         }
    5687             : 
    5688             :         file_flags = force_nonblock ? O_NONBLOCK : 0;
    5689             : 
    5690             :         ret = __sys_connect_file(req->file, &io->address,
    5691             :                                         req->connect.addr_len, file_flags);
    5692             :         if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
    5693             :                 if (req_has_async_data(req))
    5694             :                         return -EAGAIN;
    5695             :                 if (io_alloc_async_data(req)) {
    5696             :                         ret = -ENOMEM;
    5697             :                         goto out;
    5698             :                 }
    5699             :                 memcpy(req->async_data, &__io, sizeof(__io));
    5700             :                 return -EAGAIN;
    5701             :         }
    5702             :         if (ret == -ERESTARTSYS)
    5703             :                 ret = -EINTR;
    5704             : out:
    5705             :         if (ret < 0)
    5706             :                 req_set_fail(req);
    5707             :         __io_req_complete(req, issue_flags, ret, 0);
    5708             :         return 0;
    5709             : }
    5710             : #else /* !CONFIG_NET */
    5711             : #define IO_NETOP_FN(op)                                                 \
    5712             : static int io_##op(struct io_kiocb *req, unsigned int issue_flags)      \
    5713             : {                                                                       \
    5714             :         return -EOPNOTSUPP;                                             \
    5715             : }
    5716             : 
    5717             : #define IO_NETOP_PREP(op)                                               \
    5718             : IO_NETOP_FN(op)                                                         \
    5719             : static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
    5720             : {                                                                       \
    5721             :         return -EOPNOTSUPP;                                             \
    5722             : }                                                                       \
    5723             : 
    5724             : #define IO_NETOP_PREP_ASYNC(op)                                         \
    5725             : IO_NETOP_PREP(op)                                                       \
    5726             : static int io_##op##_prep_async(struct io_kiocb *req)                   \
    5727             : {                                                                       \
    5728             :         return -EOPNOTSUPP;                                             \
    5729             : }
    5730             : 
    5731             : IO_NETOP_PREP_ASYNC(sendmsg);
    5732             : IO_NETOP_PREP_ASYNC(recvmsg);
    5733             : IO_NETOP_PREP_ASYNC(connect);
    5734             : IO_NETOP_PREP(accept);
    5735             : IO_NETOP_FN(send);
    5736             : IO_NETOP_FN(recv);
    5737             : #endif /* CONFIG_NET */
    5738             : 
    5739             : struct io_poll_table {
    5740             :         struct poll_table_struct pt;
    5741             :         struct io_kiocb *req;
    5742             :         int nr_entries;
    5743             :         int error;
    5744             : };
    5745             : 
    5746             : #define IO_POLL_CANCEL_FLAG     BIT(31)
    5747             : #define IO_POLL_REF_MASK        GENMASK(30, 0)
    5748             : 
    5749             : /*
    5750             :  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
    5751             :  * bump it and acquire ownership. It's disallowed to modify requests while not
    5752             :  * owning it, that prevents from races for enqueueing task_work's and b/w
    5753             :  * arming poll and wakeups.
    5754             :  */
    5755             : static inline bool io_poll_get_ownership(struct io_kiocb *req)
    5756             : {
    5757           0 :         return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
    5758             : }
    5759             : 
    5760             : static void io_poll_mark_cancelled(struct io_kiocb *req)
    5761             : {
    5762           0 :         atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
    5763             : }
    5764             : 
    5765             : static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
    5766             : {
    5767             :         /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
    5768           0 :         if (req->opcode == IORING_OP_POLL_ADD)
    5769           0 :                 return req->async_data;
    5770           0 :         return req->apoll->double_poll;
    5771             : }
    5772             : 
    5773             : static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
    5774             : {
    5775           0 :         if (req->opcode == IORING_OP_POLL_ADD)
    5776           0 :                 return &req->poll;
    5777           0 :         return &req->apoll->poll;
    5778             : }
    5779             : 
    5780             : static void io_poll_req_insert(struct io_kiocb *req)
    5781             : {
    5782           0 :         struct io_ring_ctx *ctx = req->ctx;
    5783             :         struct hlist_head *list;
    5784             : 
    5785           0 :         list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
    5786           0 :         hlist_add_head(&req->hash_node, list);
    5787             : }
    5788             : 
    5789             : static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
    5790             :                               wait_queue_func_t wake_func)
    5791             : {
    5792           0 :         poll->head = NULL;
    5793             : #define IO_POLL_UNMASK  (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
    5794             :         /* mask in events that we always want/need */
    5795           0 :         poll->events = events | IO_POLL_UNMASK;
    5796           0 :         INIT_LIST_HEAD(&poll->wait.entry);
    5797           0 :         init_waitqueue_func_entry(&poll->wait, wake_func);
    5798             : }
    5799             : 
    5800           0 : static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
    5801             : {
    5802           0 :         struct wait_queue_head *head = smp_load_acquire(&poll->head);
    5803             : 
    5804           0 :         if (head) {
    5805           0 :                 spin_lock_irq(&head->lock);
    5806           0 :                 list_del_init(&poll->wait.entry);
    5807           0 :                 poll->head = NULL;
    5808           0 :                 spin_unlock_irq(&head->lock);
    5809             :         }
    5810           0 : }
    5811             : 
    5812           0 : static void io_poll_remove_entries(struct io_kiocb *req)
    5813             : {
    5814             :         /*
    5815             :          * Nothing to do if neither of those flags are set. Avoid dipping
    5816             :          * into the poll/apoll/double cachelines if we can.
    5817             :          */
    5818           0 :         if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
    5819             :                 return;
    5820             : 
    5821             :         /*
    5822             :          * While we hold the waitqueue lock and the waitqueue is nonempty,
    5823             :          * wake_up_pollfree() will wait for us.  However, taking the waitqueue
    5824             :          * lock in the first place can race with the waitqueue being freed.
    5825             :          *
    5826             :          * We solve this as eventpoll does: by taking advantage of the fact that
    5827             :          * all users of wake_up_pollfree() will RCU-delay the actual free.  If
    5828             :          * we enter rcu_read_lock() and see that the pointer to the queue is
    5829             :          * non-NULL, we can then lock it without the memory being freed out from
    5830             :          * under us.
    5831             :          *
    5832             :          * Keep holding rcu_read_lock() as long as we hold the queue lock, in
    5833             :          * case the caller deletes the entry from the queue, leaving it empty.
    5834             :          * In that case, only RCU prevents the queue memory from being freed.
    5835             :          */
    5836             :         rcu_read_lock();
    5837           0 :         if (req->flags & REQ_F_SINGLE_POLL)
    5838           0 :                 io_poll_remove_entry(io_poll_get_single(req));
    5839           0 :         if (req->flags & REQ_F_DOUBLE_POLL)
    5840           0 :                 io_poll_remove_entry(io_poll_get_double(req));
    5841             :         rcu_read_unlock();
    5842             : }
    5843             : 
    5844             : /*
    5845             :  * All poll tw should go through this. Checks for poll events, manages
    5846             :  * references, does rewait, etc.
    5847             :  *
    5848             :  * Returns a negative error on failure. >0 when no action require, which is
    5849             :  * either spurious wakeup or multishot CQE is served. 0 when it's done with
    5850             :  * the request, then the mask is stored in req->result.
    5851             :  */
    5852           0 : static int io_poll_check_events(struct io_kiocb *req, bool locked)
    5853             : {
    5854           0 :         struct io_ring_ctx *ctx = req->ctx;
    5855             :         int v;
    5856             : 
    5857             :         /* req->task == current here, checking PF_EXITING is safe */
    5858           0 :         if (unlikely(req->task->flags & PF_EXITING))
    5859             :                 io_poll_mark_cancelled(req);
    5860             : 
    5861             :         do {
    5862           0 :                 v = atomic_read(&req->poll_refs);
    5863             : 
    5864             :                 /* tw handler should be the owner, and so have some references */
    5865           0 :                 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
    5866             :                         return 0;
    5867           0 :                 if (v & IO_POLL_CANCEL_FLAG)
    5868             :                         return -ECANCELED;
    5869             : 
    5870           0 :                 if (!req->result) {
    5871           0 :                         struct poll_table_struct pt = { ._key = req->apoll_events };
    5872           0 :                         unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
    5873             : 
    5874           0 :                         if (unlikely(!io_assign_file(req, flags)))
    5875           0 :                                 return -EBADF;
    5876           0 :                         req->result = vfs_poll(req->file, &pt) & req->apoll_events;
    5877             :                 }
    5878             : 
    5879             :                 /* multishot, just fill an CQE and proceed */
    5880           0 :                 if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
    5881           0 :                         __poll_t mask = mangle_poll(req->result & req->apoll_events);
    5882             :                         bool filled;
    5883             : 
    5884           0 :                         spin_lock(&ctx->completion_lock);
    5885           0 :                         filled = io_fill_cqe_aux(ctx, req->user_data, mask,
    5886             :                                                  IORING_CQE_F_MORE);
    5887           0 :                         io_commit_cqring(ctx);
    5888           0 :                         spin_unlock(&ctx->completion_lock);
    5889           0 :                         if (unlikely(!filled))
    5890             :                                 return -ECANCELED;
    5891           0 :                         io_cqring_ev_posted(ctx);
    5892           0 :                 } else if (req->result) {
    5893             :                         return 0;
    5894             :                 }
    5895             : 
    5896             :                 /*
    5897             :                  * Release all references, retry if someone tried to restart
    5898             :                  * task_work while we were executing it.
    5899             :                  */
    5900           0 :         } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
    5901             : 
    5902             :         return 1;
    5903             : }
    5904             : 
    5905           0 : static void io_poll_task_func(struct io_kiocb *req, bool *locked)
    5906             : {
    5907           0 :         struct io_ring_ctx *ctx = req->ctx;
    5908             :         int ret;
    5909             : 
    5910           0 :         ret = io_poll_check_events(req, *locked);
    5911           0 :         if (ret > 0)
    5912             :                 return;
    5913             : 
    5914           0 :         if (!ret) {
    5915           0 :                 req->result = mangle_poll(req->result & req->poll.events);
    5916             :         } else {
    5917           0 :                 req->result = ret;
    5918           0 :                 req_set_fail(req);
    5919             :         }
    5920             : 
    5921           0 :         io_poll_remove_entries(req);
    5922           0 :         spin_lock(&ctx->completion_lock);
    5923           0 :         hash_del(&req->hash_node);
    5924           0 :         __io_req_complete_post(req, req->result, 0);
    5925           0 :         io_commit_cqring(ctx);
    5926           0 :         spin_unlock(&ctx->completion_lock);
    5927           0 :         io_cqring_ev_posted(ctx);
    5928             : }
    5929             : 
    5930           0 : static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
    5931             : {
    5932           0 :         struct io_ring_ctx *ctx = req->ctx;
    5933             :         int ret;
    5934             : 
    5935           0 :         ret = io_poll_check_events(req, *locked);
    5936           0 :         if (ret > 0)
    5937             :                 return;
    5938             : 
    5939           0 :         io_poll_remove_entries(req);
    5940           0 :         spin_lock(&ctx->completion_lock);
    5941           0 :         hash_del(&req->hash_node);
    5942           0 :         spin_unlock(&ctx->completion_lock);
    5943             : 
    5944           0 :         if (!ret)
    5945           0 :                 io_req_task_submit(req, locked);
    5946             :         else
    5947           0 :                 io_req_complete_failed(req, ret);
    5948             : }
    5949             : 
    5950             : static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
    5951             : {
    5952           0 :         req->result = mask;
    5953             :         /*
    5954             :          * This is useful for poll that is armed on behalf of another
    5955             :          * request, and where the wakeup path could be on a different
    5956             :          * CPU. We want to avoid pulling in req->apoll->events for that
    5957             :          * case.
    5958             :          */
    5959           0 :         req->apoll_events = events;
    5960           0 :         if (req->opcode == IORING_OP_POLL_ADD)
    5961           0 :                 req->io_task_work.func = io_poll_task_func;
    5962             :         else
    5963           0 :                 req->io_task_work.func = io_apoll_task_func;
    5964             : 
    5965           0 :         trace_io_uring_task_add(req->ctx, req, req->user_data, req->opcode, mask);
    5966           0 :         io_req_task_work_add(req, false);
    5967             : }
    5968             : 
    5969           0 : static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
    5970             : {
    5971           0 :         if (io_poll_get_ownership(req))
    5972             :                 __io_poll_execute(req, res, events);
    5973           0 : }
    5974             : 
    5975             : static void io_poll_cancel_req(struct io_kiocb *req)
    5976             : {
    5977           0 :         io_poll_mark_cancelled(req);
    5978             :         /* kick tw, which should complete the request */
    5979           0 :         io_poll_execute(req, 0, 0);
    5980             : }
    5981             : 
    5982             : #define wqe_to_req(wait)        ((void *)((unsigned long) (wait)->private & ~1))
    5983             : #define wqe_is_double(wait)     ((unsigned long) (wait)->private & 1)
    5984             : 
    5985           0 : static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
    5986             :                         void *key)
    5987             : {
    5988           0 :         struct io_kiocb *req = wqe_to_req(wait);
    5989           0 :         struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
    5990             :                                                  wait);
    5991           0 :         __poll_t mask = key_to_poll(key);
    5992             : 
    5993           0 :         if (unlikely(mask & POLLFREE)) {
    5994           0 :                 io_poll_mark_cancelled(req);
    5995             :                 /* we have to kick tw in case it's not already */
    5996           0 :                 io_poll_execute(req, 0, poll->events);
    5997             : 
    5998             :                 /*
    5999             :                  * If the waitqueue is being freed early but someone is already
    6000             :                  * holds ownership over it, we have to tear down the request as
    6001             :                  * best we can. That means immediately removing the request from
    6002             :                  * its waitqueue and preventing all further accesses to the
    6003             :                  * waitqueue via the request.
    6004             :                  */
    6005           0 :                 list_del_init(&poll->wait.entry);
    6006             : 
    6007             :                 /*
    6008             :                  * Careful: this *must* be the last step, since as soon
    6009             :                  * as req->head is NULL'ed out, the request can be
    6010             :                  * completed and freed, since aio_poll_complete_work()
    6011             :                  * will no longer need to take the waitqueue lock.
    6012             :                  */
    6013           0 :                 smp_store_release(&poll->head, NULL);
    6014           0 :                 return 1;
    6015             :         }
    6016             : 
    6017             :         /* for instances that support it check for an event match first */
    6018           0 :         if (mask && !(mask & poll->events))
    6019             :                 return 0;
    6020             : 
    6021           0 :         if (io_poll_get_ownership(req)) {
    6022             :                 /* optional, saves extra locking for removal in tw handler */
    6023           0 :                 if (mask && poll->events & EPOLLONESHOT) {
    6024           0 :                         list_del_init(&poll->wait.entry);
    6025           0 :                         poll->head = NULL;
    6026           0 :                         if (wqe_is_double(wait))
    6027           0 :                                 req->flags &= ~REQ_F_DOUBLE_POLL;
    6028             :                         else
    6029           0 :                                 req->flags &= ~REQ_F_SINGLE_POLL;
    6030             :                 }
    6031           0 :                 __io_poll_execute(req, mask, poll->events);
    6032             :         }
    6033             :         return 1;
    6034             : }
    6035             : 
    6036           0 : static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
    6037             :                             struct wait_queue_head *head,
    6038             :                             struct io_poll_iocb **poll_ptr)
    6039             : {
    6040           0 :         struct io_kiocb *req = pt->req;
    6041           0 :         unsigned long wqe_private = (unsigned long) req;
    6042             : 
    6043             :         /*
    6044             :          * The file being polled uses multiple waitqueues for poll handling
    6045             :          * (e.g. one for read, one for write). Setup a separate io_poll_iocb
    6046             :          * if this happens.
    6047             :          */
    6048           0 :         if (unlikely(pt->nr_entries)) {
    6049           0 :                 struct io_poll_iocb *first = poll;
    6050             : 
    6051             :                 /* double add on the same waitqueue head, ignore */
    6052           0 :                 if (first->head == head)
    6053             :                         return;
    6054             :                 /* already have a 2nd entry, fail a third attempt */
    6055           0 :                 if (*poll_ptr) {
    6056           0 :                         if ((*poll_ptr)->head == head)
    6057             :                                 return;
    6058           0 :                         pt->error = -EINVAL;
    6059           0 :                         return;
    6060             :                 }
    6061             : 
    6062           0 :                 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
    6063           0 :                 if (!poll) {
    6064           0 :                         pt->error = -ENOMEM;
    6065           0 :                         return;
    6066             :                 }
    6067             :                 /* mark as double wq entry */
    6068           0 :                 wqe_private |= 1;
    6069           0 :                 req->flags |= REQ_F_DOUBLE_POLL;
    6070           0 :                 io_init_poll_iocb(poll, first->events, first->wait.func);
    6071           0 :                 *poll_ptr = poll;
    6072           0 :                 if (req->opcode == IORING_OP_POLL_ADD)
    6073           0 :                         req->flags |= REQ_F_ASYNC_DATA;
    6074             :         }
    6075             : 
    6076           0 :         req->flags |= REQ_F_SINGLE_POLL;
    6077           0 :         pt->nr_entries++;
    6078           0 :         poll->head = head;
    6079           0 :         poll->wait.private = (void *) wqe_private;
    6080             : 
    6081           0 :         if (poll->events & EPOLLEXCLUSIVE)
    6082           0 :                 add_wait_queue_exclusive(head, &poll->wait);
    6083             :         else
    6084           0 :                 add_wait_queue(head, &poll->wait);
    6085             : }
    6086             : 
    6087           0 : static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
    6088             :                                struct poll_table_struct *p)
    6089             : {
    6090           0 :         struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
    6091             : 
    6092           0 :         __io_queue_proc(&pt->req->poll, pt, head,
    6093           0 :                         (struct io_poll_iocb **) &pt->req->async_data);
    6094           0 : }
    6095             : 
    6096           0 : static int __io_arm_poll_handler(struct io_kiocb *req,
    6097             :                                  struct io_poll_iocb *poll,
    6098             :                                  struct io_poll_table *ipt, __poll_t mask)
    6099             : {
    6100           0 :         struct io_ring_ctx *ctx = req->ctx;
    6101             :         int v;
    6102             : 
    6103           0 :         INIT_HLIST_NODE(&req->hash_node);
    6104           0 :         io_init_poll_iocb(poll, mask, io_poll_wake);
    6105           0 :         poll->file = req->file;
    6106             : 
    6107           0 :         ipt->pt._key = mask;
    6108           0 :         ipt->req = req;
    6109           0 :         ipt->error = 0;
    6110           0 :         ipt->nr_entries = 0;
    6111             : 
    6112             :         /*
    6113             :          * Take the ownership to delay any tw execution up until we're done
    6114             :          * with poll arming. see io_poll_get_ownership().
    6115             :          */
    6116           0 :         atomic_set(&req->poll_refs, 1);
    6117           0 :         mask = vfs_poll(req->file, &ipt->pt) & poll->events;
    6118             : 
    6119           0 :         if (mask && (poll->events & EPOLLONESHOT)) {
    6120           0 :                 io_poll_remove_entries(req);
    6121             :                 /* no one else has access to the req, forget about the ref */
    6122           0 :                 return mask;
    6123             :         }
    6124           0 :         if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
    6125           0 :                 io_poll_remove_entries(req);
    6126           0 :                 if (!ipt->error)
    6127           0 :                         ipt->error = -EINVAL;
    6128             :                 return 0;
    6129             :         }
    6130             : 
    6131           0 :         spin_lock(&ctx->completion_lock);
    6132           0 :         io_poll_req_insert(req);
    6133           0 :         spin_unlock(&ctx->completion_lock);
    6134             : 
    6135           0 :         if (mask) {
    6136             :                 /* can't multishot if failed, just queue the event we've got */
    6137           0 :                 if (unlikely(ipt->error || !ipt->nr_entries))
    6138           0 :                         poll->events |= EPOLLONESHOT;
    6139           0 :                 __io_poll_execute(req, mask, poll->events);
    6140           0 :                 return 0;
    6141             :         }
    6142             : 
    6143             :         /*
    6144             :          * Release ownership. If someone tried to queue a tw while it was
    6145             :          * locked, kick it off for them.
    6146             :          */
    6147           0 :         v = atomic_dec_return(&req->poll_refs);
    6148           0 :         if (unlikely(v & IO_POLL_REF_MASK))
    6149           0 :                 __io_poll_execute(req, 0, poll->events);
    6150             :         return 0;
    6151             : }
    6152             : 
    6153           0 : static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
    6154             :                                struct poll_table_struct *p)
    6155             : {
    6156           0 :         struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
    6157           0 :         struct async_poll *apoll = pt->req->apoll;
    6158             : 
    6159           0 :         __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
    6160           0 : }
    6161             : 
    6162             : enum {
    6163             :         IO_APOLL_OK,
    6164             :         IO_APOLL_ABORTED,
    6165             :         IO_APOLL_READY
    6166             : };
    6167             : 
    6168           0 : static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
    6169             : {
    6170           0 :         const struct io_op_def *def = &io_op_defs[req->opcode];
    6171           0 :         struct io_ring_ctx *ctx = req->ctx;
    6172             :         struct async_poll *apoll;
    6173             :         struct io_poll_table ipt;
    6174           0 :         __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
    6175             :         int ret;
    6176             : 
    6177           0 :         if (!def->pollin && !def->pollout)
    6178             :                 return IO_APOLL_ABORTED;
    6179           0 :         if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED))
    6180             :                 return IO_APOLL_ABORTED;
    6181             : 
    6182           0 :         if (def->pollin) {
    6183           0 :                 mask |= POLLIN | POLLRDNORM;
    6184             : 
    6185             :                 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
    6186           0 :                 if ((req->opcode == IORING_OP_RECVMSG) &&
    6187           0 :                     (req->sr_msg.msg_flags & MSG_ERRQUEUE))
    6188           0 :                         mask &= ~POLLIN;
    6189             :         } else {
    6190             :                 mask |= POLLOUT | POLLWRNORM;
    6191             :         }
    6192           0 :         if (def->poll_exclusive)
    6193           0 :                 mask |= EPOLLEXCLUSIVE;
    6194           0 :         if (!(issue_flags & IO_URING_F_UNLOCKED) &&
    6195           0 :             !list_empty(&ctx->apoll_cache)) {
    6196           0 :                 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
    6197             :                                                 poll.wait.entry);
    6198           0 :                 list_del_init(&apoll->poll.wait.entry);
    6199             :         } else {
    6200           0 :                 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
    6201           0 :                 if (unlikely(!apoll))
    6202             :                         return IO_APOLL_ABORTED;
    6203             :         }
    6204           0 :         apoll->double_poll = NULL;
    6205           0 :         req->apoll = apoll;
    6206           0 :         req->flags |= REQ_F_POLLED;
    6207           0 :         ipt.pt._qproc = io_async_queue_proc;
    6208             : 
    6209           0 :         io_kbuf_recycle(req, issue_flags);
    6210             : 
    6211           0 :         ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
    6212           0 :         if (ret || ipt.error)
    6213           0 :                 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
    6214             : 
    6215             :         trace_io_uring_poll_arm(ctx, req, req->user_data, req->opcode,
    6216             :                                 mask, apoll->poll.events);
    6217             :         return IO_APOLL_OK;
    6218             : }
    6219             : 
    6220             : /*
    6221             :  * Returns true if we found and killed one or more poll requests
    6222             :  */
    6223           0 : static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
    6224             :                                       struct task_struct *tsk, bool cancel_all)
    6225             : {
    6226             :         struct hlist_node *tmp;
    6227             :         struct io_kiocb *req;
    6228           0 :         bool found = false;
    6229             :         int i;
    6230             : 
    6231           0 :         spin_lock(&ctx->completion_lock);
    6232           0 :         for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
    6233             :                 struct hlist_head *list;
    6234             : 
    6235           0 :                 list = &ctx->cancel_hash[i];
    6236           0 :                 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
    6237           0 :                         if (io_match_task_safe(req, tsk, cancel_all)) {
    6238           0 :                                 hlist_del_init(&req->hash_node);
    6239           0 :                                 io_poll_cancel_req(req);
    6240           0 :                                 found = true;
    6241             :                         }
    6242             :                 }
    6243             :         }
    6244           0 :         spin_unlock(&ctx->completion_lock);
    6245           0 :         return found;
    6246             : }
    6247             : 
    6248           0 : static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
    6249             :                                      bool poll_only)
    6250             :         __must_hold(&ctx->completion_lock)
    6251             : {
    6252             :         struct hlist_head *list;
    6253             :         struct io_kiocb *req;
    6254             : 
    6255           0 :         list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
    6256           0 :         hlist_for_each_entry(req, list, hash_node) {
    6257           0 :                 if (sqe_addr != req->user_data)
    6258           0 :                         continue;
    6259           0 :                 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
    6260           0 :                         continue;
    6261             :                 return req;
    6262             :         }
    6263             :         return NULL;
    6264             : }
    6265             : 
    6266           0 : static bool io_poll_disarm(struct io_kiocb *req)
    6267             :         __must_hold(&ctx->completion_lock)
    6268             : {
    6269           0 :         if (!io_poll_get_ownership(req))
    6270             :                 return false;
    6271           0 :         io_poll_remove_entries(req);
    6272           0 :         hash_del(&req->hash_node);
    6273             :         return true;
    6274             : }
    6275             : 
    6276           0 : static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
    6277             :                           bool poll_only)
    6278             :         __must_hold(&ctx->completion_lock)
    6279             : {
    6280           0 :         struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
    6281             : 
    6282           0 :         if (!req)
    6283             :                 return -ENOENT;
    6284           0 :         io_poll_cancel_req(req);
    6285           0 :         return 0;
    6286             : }
    6287             : 
    6288             : static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
    6289             :                                      unsigned int flags)
    6290             : {
    6291             :         u32 events;
    6292             : 
    6293           0 :         events = READ_ONCE(sqe->poll32_events);
    6294             : #ifdef __BIG_ENDIAN
    6295             :         events = swahw32(events);
    6296             : #endif
    6297           0 :         if (!(flags & IORING_POLL_ADD_MULTI))
    6298           0 :                 events |= EPOLLONESHOT;
    6299           0 :         return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
    6300             : }
    6301             : 
    6302           0 : static int io_poll_update_prep(struct io_kiocb *req,
    6303             :                                const struct io_uring_sqe *sqe)
    6304             : {
    6305           0 :         struct io_poll_update *upd = &req->poll_update;
    6306             :         u32 flags;
    6307             : 
    6308           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    6309             :                 return -EINVAL;
    6310           0 :         if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
    6311             :                 return -EINVAL;
    6312           0 :         flags = READ_ONCE(sqe->len);
    6313           0 :         if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
    6314             :                       IORING_POLL_ADD_MULTI))
    6315             :                 return -EINVAL;
    6316             :         /* meaningless without update */
    6317           0 :         if (flags == IORING_POLL_ADD_MULTI)
    6318             :                 return -EINVAL;
    6319             : 
    6320           0 :         upd->old_user_data = READ_ONCE(sqe->addr);
    6321           0 :         upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
    6322           0 :         upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
    6323             : 
    6324           0 :         upd->new_user_data = READ_ONCE(sqe->off);
    6325           0 :         if (!upd->update_user_data && upd->new_user_data)
    6326             :                 return -EINVAL;
    6327           0 :         if (upd->update_events)
    6328           0 :                 upd->events = io_poll_parse_events(sqe, flags);
    6329           0 :         else if (sqe->poll32_events)
    6330             :                 return -EINVAL;
    6331             : 
    6332             :         return 0;
    6333             : }
    6334             : 
    6335           0 : static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    6336             : {
    6337           0 :         struct io_poll_iocb *poll = &req->poll;
    6338             :         u32 flags;
    6339             : 
    6340           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    6341             :                 return -EINVAL;
    6342           0 :         if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
    6343             :                 return -EINVAL;
    6344           0 :         flags = READ_ONCE(sqe->len);
    6345           0 :         if (flags & ~IORING_POLL_ADD_MULTI)
    6346             :                 return -EINVAL;
    6347           0 :         if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
    6348             :                 return -EINVAL;
    6349             : 
    6350           0 :         io_req_set_refcount(req);
    6351           0 :         req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
    6352           0 :         return 0;
    6353             : }
    6354             : 
    6355           0 : static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
    6356             : {
    6357           0 :         struct io_poll_iocb *poll = &req->poll;
    6358             :         struct io_poll_table ipt;
    6359             :         int ret;
    6360             : 
    6361           0 :         ipt.pt._qproc = io_poll_queue_proc;
    6362             : 
    6363           0 :         ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
    6364           0 :         ret = ret ?: ipt.error;
    6365           0 :         if (ret)
    6366             :                 __io_req_complete(req, issue_flags, ret, 0);
    6367           0 :         return 0;
    6368             : }
    6369             : 
    6370           0 : static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
    6371             : {
    6372           0 :         struct io_ring_ctx *ctx = req->ctx;
    6373             :         struct io_kiocb *preq;
    6374           0 :         int ret2, ret = 0;
    6375             :         bool locked;
    6376             : 
    6377           0 :         spin_lock(&ctx->completion_lock);
    6378           0 :         preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
    6379           0 :         if (!preq || !io_poll_disarm(preq)) {
    6380           0 :                 spin_unlock(&ctx->completion_lock);
    6381           0 :                 ret = preq ? -EALREADY : -ENOENT;
    6382             :                 goto out;
    6383             :         }
    6384           0 :         spin_unlock(&ctx->completion_lock);
    6385             : 
    6386           0 :         if (req->poll_update.update_events || req->poll_update.update_user_data) {
    6387             :                 /* only mask one event flags, keep behavior flags */
    6388           0 :                 if (req->poll_update.update_events) {
    6389           0 :                         preq->poll.events &= ~0xffff;
    6390           0 :                         preq->poll.events |= req->poll_update.events & 0xffff;
    6391           0 :                         preq->poll.events |= IO_POLL_UNMASK;
    6392             :                 }
    6393           0 :                 if (req->poll_update.update_user_data)
    6394           0 :                         preq->user_data = req->poll_update.new_user_data;
    6395             : 
    6396           0 :                 ret2 = io_poll_add(preq, issue_flags);
    6397             :                 /* successfully updated, don't complete poll request */
    6398           0 :                 if (!ret2)
    6399             :                         goto out;
    6400             :         }
    6401             : 
    6402           0 :         req_set_fail(preq);
    6403           0 :         preq->result = -ECANCELED;
    6404           0 :         locked = !(issue_flags & IO_URING_F_UNLOCKED);
    6405           0 :         io_req_task_complete(preq, &locked);
    6406             : out:
    6407           0 :         if (ret < 0)
    6408           0 :                 req_set_fail(req);
    6409             :         /* complete update request, we're done with it */
    6410           0 :         __io_req_complete(req, issue_flags, ret, 0);
    6411           0 :         return 0;
    6412             : }
    6413             : 
    6414           0 : static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
    6415             : {
    6416           0 :         struct io_timeout_data *data = container_of(timer,
    6417             :                                                 struct io_timeout_data, timer);
    6418           0 :         struct io_kiocb *req = data->req;
    6419           0 :         struct io_ring_ctx *ctx = req->ctx;
    6420             :         unsigned long flags;
    6421             : 
    6422           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
    6423           0 :         list_del_init(&req->timeout.list);
    6424           0 :         atomic_set(&req->ctx->cq_timeouts,
    6425           0 :                 atomic_read(&req->ctx->cq_timeouts) + 1);
    6426           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
    6427             : 
    6428           0 :         if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
    6429           0 :                 req_set_fail(req);
    6430             : 
    6431           0 :         req->result = -ETIME;
    6432           0 :         req->io_task_work.func = io_req_task_complete;
    6433           0 :         io_req_task_work_add(req, false);
    6434           0 :         return HRTIMER_NORESTART;
    6435             : }
    6436             : 
    6437           0 : static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
    6438             :                                            __u64 user_data)
    6439             :         __must_hold(&ctx->timeout_lock)
    6440             : {
    6441             :         struct io_timeout_data *io;
    6442             :         struct io_kiocb *req;
    6443           0 :         bool found = false;
    6444             : 
    6445           0 :         list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
    6446           0 :                 found = user_data == req->user_data;
    6447           0 :                 if (found)
    6448             :                         break;
    6449             :         }
    6450           0 :         if (!found)
    6451             :                 return ERR_PTR(-ENOENT);
    6452             : 
    6453           0 :         io = req->async_data;
    6454           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
    6455             :                 return ERR_PTR(-EALREADY);
    6456           0 :         list_del_init(&req->timeout.list);
    6457           0 :         return req;
    6458             : }
    6459             : 
    6460           0 : static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
    6461             :         __must_hold(&ctx->completion_lock)
    6462             :         __must_hold(&ctx->timeout_lock)
    6463             : {
    6464           0 :         struct io_kiocb *req = io_timeout_extract(ctx, user_data);
    6465             : 
    6466           0 :         if (IS_ERR(req))
    6467           0 :                 return PTR_ERR(req);
    6468           0 :         io_req_task_queue_fail(req, -ECANCELED);
    6469           0 :         return 0;
    6470             : }
    6471             : 
    6472           0 : static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
    6473             : {
    6474           0 :         switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
    6475             :         case IORING_TIMEOUT_BOOTTIME:
    6476             :                 return CLOCK_BOOTTIME;
    6477             :         case IORING_TIMEOUT_REALTIME:
    6478             :                 return CLOCK_REALTIME;
    6479             :         default:
    6480             :                 /* can't happen, vetted at prep time */
    6481           0 :                 WARN_ON_ONCE(1);
    6482             :                 fallthrough;
    6483             :         case 0:
    6484             :                 return CLOCK_MONOTONIC;
    6485             :         }
    6486             : }
    6487             : 
    6488           0 : static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
    6489             :                                     struct timespec64 *ts, enum hrtimer_mode mode)
    6490             :         __must_hold(&ctx->timeout_lock)
    6491             : {
    6492             :         struct io_timeout_data *io;
    6493             :         struct io_kiocb *req;
    6494           0 :         bool found = false;
    6495             : 
    6496           0 :         list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
    6497           0 :                 found = user_data == req->user_data;
    6498           0 :                 if (found)
    6499             :                         break;
    6500             :         }
    6501           0 :         if (!found)
    6502             :                 return -ENOENT;
    6503             : 
    6504           0 :         io = req->async_data;
    6505           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
    6506             :                 return -EALREADY;
    6507           0 :         hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
    6508           0 :         io->timer.function = io_link_timeout_fn;
    6509           0 :         hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
    6510           0 :         return 0;
    6511             : }
    6512             : 
    6513           0 : static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
    6514             :                              struct timespec64 *ts, enum hrtimer_mode mode)
    6515             :         __must_hold(&ctx->timeout_lock)
    6516             : {
    6517           0 :         struct io_kiocb *req = io_timeout_extract(ctx, user_data);
    6518             :         struct io_timeout_data *data;
    6519             : 
    6520           0 :         if (IS_ERR(req))
    6521           0 :                 return PTR_ERR(req);
    6522             : 
    6523           0 :         req->timeout.off = 0; /* noseq */
    6524           0 :         data = req->async_data;
    6525           0 :         list_add_tail(&req->timeout.list, &ctx->timeout_list);
    6526           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
    6527           0 :         data->timer.function = io_timeout_fn;
    6528           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
    6529           0 :         return 0;
    6530             : }
    6531             : 
    6532           0 : static int io_timeout_remove_prep(struct io_kiocb *req,
    6533             :                                   const struct io_uring_sqe *sqe)
    6534             : {
    6535           0 :         struct io_timeout_rem *tr = &req->timeout_rem;
    6536             : 
    6537           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    6538             :                 return -EINVAL;
    6539           0 :         if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
    6540             :                 return -EINVAL;
    6541           0 :         if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
    6542             :                 return -EINVAL;
    6543             : 
    6544           0 :         tr->ltimeout = false;
    6545           0 :         tr->addr = READ_ONCE(sqe->addr);
    6546           0 :         tr->flags = READ_ONCE(sqe->timeout_flags);
    6547           0 :         if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
    6548           0 :                 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
    6549             :                         return -EINVAL;
    6550           0 :                 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
    6551           0 :                         tr->ltimeout = true;
    6552           0 :                 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
    6553             :                         return -EINVAL;
    6554           0 :                 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
    6555             :                         return -EFAULT;
    6556           0 :                 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
    6557             :                         return -EINVAL;
    6558           0 :         } else if (tr->flags) {
    6559             :                 /* timeout removal doesn't support flags */
    6560             :                 return -EINVAL;
    6561             :         }
    6562             : 
    6563             :         return 0;
    6564             : }
    6565             : 
    6566             : static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
    6567             : {
    6568           0 :         return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
    6569           0 :                                             : HRTIMER_MODE_REL;
    6570             : }
    6571             : 
    6572             : /*
    6573             :  * Remove or update an existing timeout command
    6574             :  */
    6575           0 : static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
    6576             : {
    6577           0 :         struct io_timeout_rem *tr = &req->timeout_rem;
    6578           0 :         struct io_ring_ctx *ctx = req->ctx;
    6579             :         int ret;
    6580             : 
    6581           0 :         if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
    6582           0 :                 spin_lock(&ctx->completion_lock);
    6583           0 :                 spin_lock_irq(&ctx->timeout_lock);
    6584           0 :                 ret = io_timeout_cancel(ctx, tr->addr);
    6585           0 :                 spin_unlock_irq(&ctx->timeout_lock);
    6586           0 :                 spin_unlock(&ctx->completion_lock);
    6587             :         } else {
    6588           0 :                 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
    6589             : 
    6590           0 :                 spin_lock_irq(&ctx->timeout_lock);
    6591           0 :                 if (tr->ltimeout)
    6592           0 :                         ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
    6593             :                 else
    6594           0 :                         ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
    6595           0 :                 spin_unlock_irq(&ctx->timeout_lock);
    6596             :         }
    6597             : 
    6598           0 :         if (ret < 0)
    6599           0 :                 req_set_fail(req);
    6600           0 :         io_req_complete_post(req, ret, 0);
    6601           0 :         return 0;
    6602             : }
    6603             : 
    6604           0 : static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
    6605             :                            bool is_timeout_link)
    6606             : {
    6607             :         struct io_timeout_data *data;
    6608             :         unsigned flags;
    6609           0 :         u32 off = READ_ONCE(sqe->off);
    6610             : 
    6611           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    6612             :                 return -EINVAL;
    6613           0 :         if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
    6614           0 :             sqe->splice_fd_in)
    6615             :                 return -EINVAL;
    6616           0 :         if (off && is_timeout_link)
    6617             :                 return -EINVAL;
    6618           0 :         flags = READ_ONCE(sqe->timeout_flags);
    6619           0 :         if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
    6620             :                       IORING_TIMEOUT_ETIME_SUCCESS))
    6621             :                 return -EINVAL;
    6622             :         /* more than one clock specified is invalid, obviously */
    6623           0 :         if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
    6624             :                 return -EINVAL;
    6625             : 
    6626           0 :         INIT_LIST_HEAD(&req->timeout.list);
    6627           0 :         req->timeout.off = off;
    6628           0 :         if (unlikely(off && !req->ctx->off_timeout_used))
    6629           0 :                 req->ctx->off_timeout_used = true;
    6630             : 
    6631           0 :         if (WARN_ON_ONCE(req_has_async_data(req)))
    6632             :                 return -EFAULT;
    6633           0 :         if (io_alloc_async_data(req))
    6634             :                 return -ENOMEM;
    6635             : 
    6636           0 :         data = req->async_data;
    6637           0 :         data->req = req;
    6638           0 :         data->flags = flags;
    6639             : 
    6640           0 :         if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
    6641             :                 return -EFAULT;
    6642             : 
    6643           0 :         if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
    6644             :                 return -EINVAL;
    6645             : 
    6646           0 :         INIT_LIST_HEAD(&req->timeout.list);
    6647           0 :         data->mode = io_translate_timeout_mode(flags);
    6648           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
    6649             : 
    6650           0 :         if (is_timeout_link) {
    6651           0 :                 struct io_submit_link *link = &req->ctx->submit_state.link;
    6652             : 
    6653           0 :                 if (!link->head)
    6654             :                         return -EINVAL;
    6655           0 :                 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
    6656             :                         return -EINVAL;
    6657           0 :                 req->timeout.head = link->last;
    6658           0 :                 link->last->flags |= REQ_F_ARM_LTIMEOUT;
    6659             :         }
    6660             :         return 0;
    6661             : }
    6662             : 
    6663           0 : static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
    6664             : {
    6665           0 :         struct io_ring_ctx *ctx = req->ctx;
    6666           0 :         struct io_timeout_data *data = req->async_data;
    6667             :         struct list_head *entry;
    6668           0 :         u32 tail, off = req->timeout.off;
    6669             : 
    6670           0 :         spin_lock_irq(&ctx->timeout_lock);
    6671             : 
    6672             :         /*
    6673             :          * sqe->off holds how many events that need to occur for this
    6674             :          * timeout event to be satisfied. If it isn't set, then this is
    6675             :          * a pure timeout request, sequence isn't used.
    6676             :          */
    6677           0 :         if (io_is_timeout_noseq(req)) {
    6678           0 :                 entry = ctx->timeout_list.prev;
    6679             :                 goto add;
    6680             :         }
    6681             : 
    6682           0 :         tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
    6683           0 :         req->timeout.target_seq = tail + off;
    6684             : 
    6685             :         /* Update the last seq here in case io_flush_timeouts() hasn't.
    6686             :          * This is safe because ->completion_lock is held, and submissions
    6687             :          * and completions are never mixed in the same ->completion_lock section.
    6688             :          */
    6689           0 :         ctx->cq_last_tm_flush = tail;
    6690             : 
    6691             :         /*
    6692             :          * Insertion sort, ensuring the first entry in the list is always
    6693             :          * the one we need first.
    6694             :          */
    6695           0 :         list_for_each_prev(entry, &ctx->timeout_list) {
    6696           0 :                 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
    6697             :                                                   timeout.list);
    6698             : 
    6699           0 :                 if (io_is_timeout_noseq(nxt))
    6700           0 :                         continue;
    6701             :                 /* nxt.seq is behind @tail, otherwise would've been completed */
    6702           0 :                 if (off >= nxt->timeout.target_seq - tail)
    6703             :                         break;
    6704             :         }
    6705             : add:
    6706           0 :         list_add(&req->timeout.list, entry);
    6707           0 :         data->timer.function = io_timeout_fn;
    6708           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
    6709           0 :         spin_unlock_irq(&ctx->timeout_lock);
    6710           0 :         return 0;
    6711             : }
    6712             : 
    6713             : struct io_cancel_data {
    6714             :         struct io_ring_ctx *ctx;
    6715             :         u64 user_data;
    6716             : };
    6717             : 
    6718           0 : static bool io_cancel_cb(struct io_wq_work *work, void *data)
    6719             : {
    6720           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
    6721           0 :         struct io_cancel_data *cd = data;
    6722             : 
    6723           0 :         return req->ctx == cd->ctx && req->user_data == cd->user_data;
    6724             : }
    6725             : 
    6726           0 : static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
    6727             :                                struct io_ring_ctx *ctx)
    6728             : {
    6729           0 :         struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
    6730             :         enum io_wq_cancel cancel_ret;
    6731           0 :         int ret = 0;
    6732             : 
    6733           0 :         if (!tctx || !tctx->io_wq)
    6734             :                 return -ENOENT;
    6735             : 
    6736           0 :         cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
    6737           0 :         switch (cancel_ret) {
    6738             :         case IO_WQ_CANCEL_OK:
    6739             :                 ret = 0;
    6740             :                 break;
    6741             :         case IO_WQ_CANCEL_RUNNING:
    6742           0 :                 ret = -EALREADY;
    6743           0 :                 break;
    6744             :         case IO_WQ_CANCEL_NOTFOUND:
    6745           0 :                 ret = -ENOENT;
    6746           0 :                 break;
    6747             :         }
    6748             : 
    6749             :         return ret;
    6750             : }
    6751             : 
    6752           0 : static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
    6753             : {
    6754           0 :         struct io_ring_ctx *ctx = req->ctx;
    6755             :         int ret;
    6756             : 
    6757           0 :         WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
    6758             : 
    6759           0 :         ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
    6760             :         /*
    6761             :          * Fall-through even for -EALREADY, as we may have poll armed
    6762             :          * that need unarming.
    6763             :          */
    6764           0 :         if (!ret)
    6765             :                 return 0;
    6766             : 
    6767           0 :         spin_lock(&ctx->completion_lock);
    6768           0 :         ret = io_poll_cancel(ctx, sqe_addr, false);
    6769           0 :         if (ret != -ENOENT)
    6770             :                 goto out;
    6771             : 
    6772           0 :         spin_lock_irq(&ctx->timeout_lock);
    6773           0 :         ret = io_timeout_cancel(ctx, sqe_addr);
    6774           0 :         spin_unlock_irq(&ctx->timeout_lock);
    6775             : out:
    6776           0 :         spin_unlock(&ctx->completion_lock);
    6777             :         return ret;
    6778             : }
    6779             : 
    6780           0 : static int io_async_cancel_prep(struct io_kiocb *req,
    6781             :                                 const struct io_uring_sqe *sqe)
    6782             : {
    6783           0 :         if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
    6784             :                 return -EINVAL;
    6785           0 :         if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
    6786             :                 return -EINVAL;
    6787           0 :         if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
    6788           0 :             sqe->splice_fd_in)
    6789             :                 return -EINVAL;
    6790             : 
    6791           0 :         req->cancel.addr = READ_ONCE(sqe->addr);
    6792           0 :         return 0;
    6793             : }
    6794             : 
    6795           0 : static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
    6796             : {
    6797           0 :         struct io_ring_ctx *ctx = req->ctx;
    6798           0 :         u64 sqe_addr = req->cancel.addr;
    6799           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    6800             :         struct io_tctx_node *node;
    6801             :         int ret;
    6802             : 
    6803           0 :         ret = io_try_cancel_userdata(req, sqe_addr);
    6804           0 :         if (ret != -ENOENT)
    6805             :                 goto done;
    6806             : 
    6807             :         /* slow path, try all io-wq's */
    6808           0 :         io_ring_submit_lock(ctx, needs_lock);
    6809           0 :         ret = -ENOENT;
    6810           0 :         list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
    6811           0 :                 struct io_uring_task *tctx = node->task->io_uring;
    6812             : 
    6813           0 :                 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
    6814           0 :                 if (ret != -ENOENT)
    6815             :                         break;
    6816             :         }
    6817           0 :         io_ring_submit_unlock(ctx, needs_lock);
    6818             : done:
    6819           0 :         if (ret < 0)
    6820           0 :                 req_set_fail(req);
    6821           0 :         io_req_complete_post(req, ret, 0);
    6822           0 :         return 0;
    6823             : }
    6824             : 
    6825             : static int io_rsrc_update_prep(struct io_kiocb *req,
    6826             :                                 const struct io_uring_sqe *sqe)
    6827             : {
    6828           0 :         if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
    6829             :                 return -EINVAL;
    6830           0 :         if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
    6831             :                 return -EINVAL;
    6832             : 
    6833           0 :         req->rsrc_update.offset = READ_ONCE(sqe->off);
    6834           0 :         req->rsrc_update.nr_args = READ_ONCE(sqe->len);
    6835           0 :         if (!req->rsrc_update.nr_args)
    6836             :                 return -EINVAL;
    6837           0 :         req->rsrc_update.arg = READ_ONCE(sqe->addr);
    6838             :         return 0;
    6839             : }
    6840             : 
    6841           0 : static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
    6842             : {
    6843           0 :         struct io_ring_ctx *ctx = req->ctx;
    6844           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    6845             :         struct io_uring_rsrc_update2 up;
    6846             :         int ret;
    6847             : 
    6848           0 :         up.offset = req->rsrc_update.offset;
    6849           0 :         up.data = req->rsrc_update.arg;
    6850           0 :         up.nr = 0;
    6851           0 :         up.tags = 0;
    6852           0 :         up.resv = 0;
    6853           0 :         up.resv2 = 0;
    6854             : 
    6855           0 :         io_ring_submit_lock(ctx, needs_lock);
    6856           0 :         ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
    6857             :                                         &up, req->rsrc_update.nr_args);
    6858           0 :         io_ring_submit_unlock(ctx, needs_lock);
    6859             : 
    6860           0 :         if (ret < 0)
    6861           0 :                 req_set_fail(req);
    6862           0 :         __io_req_complete(req, issue_flags, ret, 0);
    6863           0 :         return 0;
    6864             : }
    6865             : 
    6866           0 : static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    6867             : {
    6868           0 :         switch (req->opcode) {
    6869             :         case IORING_OP_NOP:
    6870             :                 return 0;
    6871             :         case IORING_OP_READV:
    6872             :         case IORING_OP_READ_FIXED:
    6873             :         case IORING_OP_READ:
    6874             :         case IORING_OP_WRITEV:
    6875             :         case IORING_OP_WRITE_FIXED:
    6876             :         case IORING_OP_WRITE:
    6877           0 :                 return io_prep_rw(req, sqe);
    6878             :         case IORING_OP_POLL_ADD:
    6879           0 :                 return io_poll_add_prep(req, sqe);
    6880             :         case IORING_OP_POLL_REMOVE:
    6881           0 :                 return io_poll_update_prep(req, sqe);
    6882             :         case IORING_OP_FSYNC:
    6883           0 :                 return io_fsync_prep(req, sqe);
    6884             :         case IORING_OP_SYNC_FILE_RANGE:
    6885             :                 return io_sfr_prep(req, sqe);
    6886             :         case IORING_OP_SENDMSG:
    6887             :         case IORING_OP_SEND:
    6888           0 :                 return io_sendmsg_prep(req, sqe);
    6889             :         case IORING_OP_RECVMSG:
    6890             :         case IORING_OP_RECV:
    6891           0 :                 return io_recvmsg_prep(req, sqe);
    6892             :         case IORING_OP_CONNECT:
    6893           0 :                 return io_connect_prep(req, sqe);
    6894             :         case IORING_OP_TIMEOUT:
    6895           0 :                 return io_timeout_prep(req, sqe, false);
    6896             :         case IORING_OP_TIMEOUT_REMOVE:
    6897           0 :                 return io_timeout_remove_prep(req, sqe);
    6898             :         case IORING_OP_ASYNC_CANCEL:
    6899           0 :                 return io_async_cancel_prep(req, sqe);
    6900             :         case IORING_OP_LINK_TIMEOUT:
    6901           0 :                 return io_timeout_prep(req, sqe, true);
    6902             :         case IORING_OP_ACCEPT:
    6903           0 :                 return io_accept_prep(req, sqe);
    6904             :         case IORING_OP_FALLOCATE:
    6905             :                 return io_fallocate_prep(req, sqe);
    6906             :         case IORING_OP_OPENAT:
    6907           0 :                 return io_openat_prep(req, sqe);
    6908             :         case IORING_OP_CLOSE:
    6909           0 :                 return io_close_prep(req, sqe);
    6910             :         case IORING_OP_FILES_UPDATE:
    6911             :                 return io_rsrc_update_prep(req, sqe);
    6912             :         case IORING_OP_STATX:
    6913           0 :                 return io_statx_prep(req, sqe);
    6914             :         case IORING_OP_FADVISE:
    6915             :                 return io_fadvise_prep(req, sqe);
    6916             :         case IORING_OP_MADVISE:
    6917             :                 return io_madvise_prep(req, sqe);
    6918             :         case IORING_OP_OPENAT2:
    6919           0 :                 return io_openat2_prep(req, sqe);
    6920             :         case IORING_OP_EPOLL_CTL:
    6921           0 :                 return io_epoll_ctl_prep(req, sqe);
    6922             :         case IORING_OP_SPLICE:
    6923             :                 return io_splice_prep(req, sqe);
    6924             :         case IORING_OP_PROVIDE_BUFFERS:
    6925           0 :                 return io_provide_buffers_prep(req, sqe);
    6926             :         case IORING_OP_REMOVE_BUFFERS:
    6927           0 :                 return io_remove_buffers_prep(req, sqe);
    6928             :         case IORING_OP_TEE:
    6929             :                 return io_tee_prep(req, sqe);
    6930             :         case IORING_OP_SHUTDOWN:
    6931           0 :                 return io_shutdown_prep(req, sqe);
    6932             :         case IORING_OP_RENAMEAT:
    6933           0 :                 return io_renameat_prep(req, sqe);
    6934             :         case IORING_OP_UNLINKAT:
    6935           0 :                 return io_unlinkat_prep(req, sqe);
    6936             :         case IORING_OP_MKDIRAT:
    6937           0 :                 return io_mkdirat_prep(req, sqe);
    6938             :         case IORING_OP_SYMLINKAT:
    6939           0 :                 return io_symlinkat_prep(req, sqe);
    6940             :         case IORING_OP_LINKAT:
    6941           0 :                 return io_linkat_prep(req, sqe);
    6942             :         case IORING_OP_MSG_RING:
    6943           0 :                 return io_msg_ring_prep(req, sqe);
    6944             :         }
    6945             : 
    6946           0 :         printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
    6947             :                         req->opcode);
    6948             :         return -EINVAL;
    6949             : }
    6950             : 
    6951           0 : static int io_req_prep_async(struct io_kiocb *req)
    6952             : {
    6953           0 :         const struct io_op_def *def = &io_op_defs[req->opcode];
    6954             : 
    6955             :         /* assign early for deferred execution for non-fixed file */
    6956           0 :         if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
    6957           0 :                 req->file = io_file_get_normal(req, req->fd);
    6958           0 :         if (!def->needs_async_setup)
    6959             :                 return 0;
    6960           0 :         if (WARN_ON_ONCE(req_has_async_data(req)))
    6961             :                 return -EFAULT;
    6962           0 :         if (io_alloc_async_data(req))
    6963             :                 return -EAGAIN;
    6964             : 
    6965           0 :         switch (req->opcode) {
    6966             :         case IORING_OP_READV:
    6967           0 :                 return io_rw_prep_async(req, READ);
    6968             :         case IORING_OP_WRITEV:
    6969           0 :                 return io_rw_prep_async(req, WRITE);
    6970             :         case IORING_OP_SENDMSG:
    6971             :                 return io_sendmsg_prep_async(req);
    6972             :         case IORING_OP_RECVMSG:
    6973             :                 return io_recvmsg_prep_async(req);
    6974             :         case IORING_OP_CONNECT:
    6975             :                 return io_connect_prep_async(req);
    6976             :         }
    6977           0 :         printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
    6978             :                     req->opcode);
    6979             :         return -EFAULT;
    6980             : }
    6981             : 
    6982             : static u32 io_get_sequence(struct io_kiocb *req)
    6983             : {
    6984           0 :         u32 seq = req->ctx->cached_sq_head;
    6985             : 
    6986             :         /* need original cached_sq_head, but it was increased for each req */
    6987           0 :         io_for_each_link(req, req)
    6988           0 :                 seq--;
    6989             :         return seq;
    6990             : }
    6991             : 
    6992           0 : static __cold void io_drain_req(struct io_kiocb *req)
    6993             : {
    6994           0 :         struct io_ring_ctx *ctx = req->ctx;
    6995             :         struct io_defer_entry *de;
    6996             :         int ret;
    6997           0 :         u32 seq = io_get_sequence(req);
    6998             : 
    6999             :         /* Still need defer if there is pending req in defer list. */
    7000           0 :         spin_lock(&ctx->completion_lock);
    7001           0 :         if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
    7002           0 :                 spin_unlock(&ctx->completion_lock);
    7003             : queue:
    7004           0 :                 ctx->drain_active = false;
    7005             :                 io_req_task_queue(req);
    7006             :                 return;
    7007             :         }
    7008           0 :         spin_unlock(&ctx->completion_lock);
    7009             : 
    7010           0 :         ret = io_req_prep_async(req);
    7011           0 :         if (ret) {
    7012             : fail:
    7013           0 :                 io_req_complete_failed(req, ret);
    7014           0 :                 return;
    7015             :         }
    7016           0 :         io_prep_async_link(req);
    7017           0 :         de = kmalloc(sizeof(*de), GFP_KERNEL);
    7018           0 :         if (!de) {
    7019             :                 ret = -ENOMEM;
    7020             :                 goto fail;
    7021             :         }
    7022             : 
    7023           0 :         spin_lock(&ctx->completion_lock);
    7024           0 :         if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
    7025           0 :                 spin_unlock(&ctx->completion_lock);
    7026           0 :                 kfree(de);
    7027           0 :                 goto queue;
    7028             :         }
    7029             : 
    7030           0 :         trace_io_uring_defer(ctx, req, req->user_data, req->opcode);
    7031           0 :         de->req = req;
    7032           0 :         de->seq = seq;
    7033           0 :         list_add_tail(&de->list, &ctx->defer_list);
    7034           0 :         spin_unlock(&ctx->completion_lock);
    7035             : }
    7036             : 
    7037           0 : static void io_clean_op(struct io_kiocb *req)
    7038             : {
    7039           0 :         if (req->flags & REQ_F_BUFFER_SELECTED) {
    7040           0 :                 spin_lock(&req->ctx->completion_lock);
    7041           0 :                 io_put_kbuf_comp(req);
    7042           0 :                 spin_unlock(&req->ctx->completion_lock);
    7043             :         }
    7044             : 
    7045           0 :         if (req->flags & REQ_F_NEED_CLEANUP) {
    7046           0 :                 switch (req->opcode) {
    7047             :                 case IORING_OP_READV:
    7048             :                 case IORING_OP_READ_FIXED:
    7049             :                 case IORING_OP_READ:
    7050             :                 case IORING_OP_WRITEV:
    7051             :                 case IORING_OP_WRITE_FIXED:
    7052             :                 case IORING_OP_WRITE: {
    7053           0 :                         struct io_async_rw *io = req->async_data;
    7054             : 
    7055           0 :                         kfree(io->free_iovec);
    7056           0 :                         break;
    7057             :                         }
    7058             :                 case IORING_OP_RECVMSG:
    7059             :                 case IORING_OP_SENDMSG: {
    7060           0 :                         struct io_async_msghdr *io = req->async_data;
    7061             : 
    7062           0 :                         kfree(io->free_iov);
    7063           0 :                         break;
    7064             :                         }
    7065             :                 case IORING_OP_OPENAT:
    7066             :                 case IORING_OP_OPENAT2:
    7067           0 :                         if (req->open.filename)
    7068           0 :                                 putname(req->open.filename);
    7069             :                         break;
    7070             :                 case IORING_OP_RENAMEAT:
    7071           0 :                         putname(req->rename.oldpath);
    7072           0 :                         putname(req->rename.newpath);
    7073           0 :                         break;
    7074             :                 case IORING_OP_UNLINKAT:
    7075           0 :                         putname(req->unlink.filename);
    7076           0 :                         break;
    7077             :                 case IORING_OP_MKDIRAT:
    7078           0 :                         putname(req->mkdir.filename);
    7079           0 :                         break;
    7080             :                 case IORING_OP_SYMLINKAT:
    7081           0 :                         putname(req->symlink.oldpath);
    7082           0 :                         putname(req->symlink.newpath);
    7083           0 :                         break;
    7084             :                 case IORING_OP_LINKAT:
    7085           0 :                         putname(req->hardlink.oldpath);
    7086           0 :                         putname(req->hardlink.newpath);
    7087           0 :                         break;
    7088             :                 case IORING_OP_STATX:
    7089           0 :                         if (req->statx.filename)
    7090           0 :                                 putname(req->statx.filename);
    7091             :                         break;
    7092             :                 }
    7093             :         }
    7094           0 :         if ((req->flags & REQ_F_POLLED) && req->apoll) {
    7095           0 :                 kfree(req->apoll->double_poll);
    7096           0 :                 kfree(req->apoll);
    7097           0 :                 req->apoll = NULL;
    7098             :         }
    7099           0 :         if (req->flags & REQ_F_CREDS)
    7100           0 :                 put_cred(req->creds);
    7101           0 :         if (req->flags & REQ_F_ASYNC_DATA) {
    7102           0 :                 kfree(req->async_data);
    7103           0 :                 req->async_data = NULL;
    7104             :         }
    7105           0 :         req->flags &= ~IO_REQ_CLEAN_FLAGS;
    7106           0 : }
    7107             : 
    7108           0 : static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
    7109             : {
    7110           0 :         if (req->file || !io_op_defs[req->opcode].needs_file)
    7111             :                 return true;
    7112             : 
    7113           0 :         if (req->flags & REQ_F_FIXED_FILE)
    7114           0 :                 req->file = io_file_get_fixed(req, req->fd, issue_flags);
    7115             :         else
    7116           0 :                 req->file = io_file_get_normal(req, req->fd);
    7117           0 :         if (req->file)
    7118             :                 return true;
    7119             : 
    7120           0 :         req_set_fail(req);
    7121           0 :         req->result = -EBADF;
    7122           0 :         return false;
    7123             : }
    7124             : 
    7125           0 : static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
    7126             : {
    7127           0 :         const struct cred *creds = NULL;
    7128             :         int ret;
    7129             : 
    7130           0 :         if (unlikely(!io_assign_file(req, issue_flags)))
    7131             :                 return -EBADF;
    7132             : 
    7133           0 :         if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
    7134           0 :                 creds = override_creds(req->creds);
    7135             : 
    7136           0 :         if (!io_op_defs[req->opcode].audit_skip)
    7137             :                 audit_uring_entry(req->opcode);
    7138             : 
    7139           0 :         switch (req->opcode) {
    7140             :         case IORING_OP_NOP:
    7141           0 :                 ret = io_nop(req, issue_flags);
    7142           0 :                 break;
    7143             :         case IORING_OP_READV:
    7144             :         case IORING_OP_READ_FIXED:
    7145             :         case IORING_OP_READ:
    7146           0 :                 ret = io_read(req, issue_flags);
    7147           0 :                 break;
    7148             :         case IORING_OP_WRITEV:
    7149             :         case IORING_OP_WRITE_FIXED:
    7150             :         case IORING_OP_WRITE:
    7151           0 :                 ret = io_write(req, issue_flags);
    7152           0 :                 break;
    7153             :         case IORING_OP_FSYNC:
    7154           0 :                 ret = io_fsync(req, issue_flags);
    7155           0 :                 break;
    7156             :         case IORING_OP_POLL_ADD:
    7157           0 :                 ret = io_poll_add(req, issue_flags);
    7158           0 :                 break;
    7159             :         case IORING_OP_POLL_REMOVE:
    7160           0 :                 ret = io_poll_update(req, issue_flags);
    7161           0 :                 break;
    7162             :         case IORING_OP_SYNC_FILE_RANGE:
    7163           0 :                 ret = io_sync_file_range(req, issue_flags);
    7164           0 :                 break;
    7165             :         case IORING_OP_SENDMSG:
    7166             :                 ret = io_sendmsg(req, issue_flags);
    7167             :                 break;
    7168             :         case IORING_OP_SEND:
    7169             :                 ret = io_send(req, issue_flags);
    7170             :                 break;
    7171             :         case IORING_OP_RECVMSG:
    7172             :                 ret = io_recvmsg(req, issue_flags);
    7173             :                 break;
    7174             :         case IORING_OP_RECV:
    7175             :                 ret = io_recv(req, issue_flags);
    7176             :                 break;
    7177             :         case IORING_OP_TIMEOUT:
    7178           0 :                 ret = io_timeout(req, issue_flags);
    7179           0 :                 break;
    7180             :         case IORING_OP_TIMEOUT_REMOVE:
    7181           0 :                 ret = io_timeout_remove(req, issue_flags);
    7182           0 :                 break;
    7183             :         case IORING_OP_ACCEPT:
    7184             :                 ret = io_accept(req, issue_flags);
    7185             :                 break;
    7186             :         case IORING_OP_CONNECT:
    7187             :                 ret = io_connect(req, issue_flags);
    7188             :                 break;
    7189             :         case IORING_OP_ASYNC_CANCEL:
    7190           0 :                 ret = io_async_cancel(req, issue_flags);
    7191           0 :                 break;
    7192             :         case IORING_OP_FALLOCATE:
    7193           0 :                 ret = io_fallocate(req, issue_flags);
    7194           0 :                 break;
    7195             :         case IORING_OP_OPENAT:
    7196           0 :                 ret = io_openat(req, issue_flags);
    7197           0 :                 break;
    7198             :         case IORING_OP_CLOSE:
    7199           0 :                 ret = io_close(req, issue_flags);
    7200           0 :                 break;
    7201             :         case IORING_OP_FILES_UPDATE:
    7202           0 :                 ret = io_files_update(req, issue_flags);
    7203           0 :                 break;
    7204             :         case IORING_OP_STATX:
    7205           0 :                 ret = io_statx(req, issue_flags);
    7206           0 :                 break;
    7207             :         case IORING_OP_FADVISE:
    7208           0 :                 ret = io_fadvise(req, issue_flags);
    7209           0 :                 break;
    7210             :         case IORING_OP_MADVISE:
    7211           0 :                 ret = io_madvise(req, issue_flags);
    7212           0 :                 break;
    7213             :         case IORING_OP_OPENAT2:
    7214           0 :                 ret = io_openat2(req, issue_flags);
    7215           0 :                 break;
    7216             :         case IORING_OP_EPOLL_CTL:
    7217           0 :                 ret = io_epoll_ctl(req, issue_flags);
    7218           0 :                 break;
    7219             :         case IORING_OP_SPLICE:
    7220           0 :                 ret = io_splice(req, issue_flags);
    7221           0 :                 break;
    7222             :         case IORING_OP_PROVIDE_BUFFERS:
    7223           0 :                 ret = io_provide_buffers(req, issue_flags);
    7224           0 :                 break;
    7225             :         case IORING_OP_REMOVE_BUFFERS:
    7226           0 :                 ret = io_remove_buffers(req, issue_flags);
    7227           0 :                 break;
    7228             :         case IORING_OP_TEE:
    7229           0 :                 ret = io_tee(req, issue_flags);
    7230           0 :                 break;
    7231             :         case IORING_OP_SHUTDOWN:
    7232             :                 ret = io_shutdown(req, issue_flags);
    7233             :                 break;
    7234             :         case IORING_OP_RENAMEAT:
    7235           0 :                 ret = io_renameat(req, issue_flags);
    7236           0 :                 break;
    7237             :         case IORING_OP_UNLINKAT:
    7238           0 :                 ret = io_unlinkat(req, issue_flags);
    7239           0 :                 break;
    7240             :         case IORING_OP_MKDIRAT:
    7241           0 :                 ret = io_mkdirat(req, issue_flags);
    7242           0 :                 break;
    7243             :         case IORING_OP_SYMLINKAT:
    7244           0 :                 ret = io_symlinkat(req, issue_flags);
    7245           0 :                 break;
    7246             :         case IORING_OP_LINKAT:
    7247           0 :                 ret = io_linkat(req, issue_flags);
    7248           0 :                 break;
    7249             :         case IORING_OP_MSG_RING:
    7250           0 :                 ret = io_msg_ring(req, issue_flags);
    7251           0 :                 break;
    7252             :         default:
    7253           0 :                 ret = -EINVAL;
    7254           0 :                 break;
    7255             :         }
    7256             : 
    7257             :         if (!io_op_defs[req->opcode].audit_skip)
    7258             :                 audit_uring_exit(!ret, ret);
    7259             : 
    7260           0 :         if (creds)
    7261           0 :                 revert_creds(creds);
    7262           0 :         if (ret)
    7263             :                 return ret;
    7264             :         /* If the op doesn't have a file, we're not polling for it */
    7265           0 :         if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
    7266           0 :                 io_iopoll_req_issued(req, issue_flags);
    7267             : 
    7268             :         return 0;
    7269             : }
    7270             : 
    7271           0 : static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
    7272             : {
    7273           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
    7274             : 
    7275           0 :         req = io_put_req_find_next(req);
    7276           0 :         return req ? &req->work : NULL;
    7277             : }
    7278             : 
    7279           0 : static void io_wq_submit_work(struct io_wq_work *work)
    7280             : {
    7281           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
    7282           0 :         const struct io_op_def *def = &io_op_defs[req->opcode];
    7283           0 :         unsigned int issue_flags = IO_URING_F_UNLOCKED;
    7284           0 :         bool needs_poll = false;
    7285             :         struct io_kiocb *timeout;
    7286           0 :         int ret = 0, err = -ECANCELED;
    7287             : 
    7288             :         /* one will be dropped by ->io_free_work() after returning to io-wq */
    7289           0 :         if (!(req->flags & REQ_F_REFCOUNT))
    7290             :                 __io_req_set_refcount(req, 2);
    7291             :         else
    7292           0 :                 req_ref_get(req);
    7293             : 
    7294           0 :         timeout = io_prep_linked_timeout(req);
    7295           0 :         if (timeout)
    7296           0 :                 io_queue_linked_timeout(timeout);
    7297             : 
    7298             : 
    7299             :         /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
    7300           0 :         if (work->flags & IO_WQ_WORK_CANCEL) {
    7301             : fail:
    7302             :                 io_req_task_queue_fail(req, err);
    7303             :                 return;
    7304             :         }
    7305           0 :         if (!io_assign_file(req, issue_flags)) {
    7306           0 :                 err = -EBADF;
    7307           0 :                 work->flags |= IO_WQ_WORK_CANCEL;
    7308           0 :                 goto fail;
    7309             :         }
    7310             : 
    7311           0 :         if (req->flags & REQ_F_FORCE_ASYNC) {
    7312           0 :                 bool opcode_poll = def->pollin || def->pollout;
    7313             : 
    7314           0 :                 if (opcode_poll && file_can_poll(req->file)) {
    7315           0 :                         needs_poll = true;
    7316           0 :                         issue_flags |= IO_URING_F_NONBLOCK;
    7317             :                 }
    7318             :         }
    7319             : 
    7320             :         do {
    7321           0 :                 ret = io_issue_sqe(req, issue_flags);
    7322           0 :                 if (ret != -EAGAIN)
    7323             :                         break;
    7324             :                 /*
    7325             :                  * We can get EAGAIN for iopolled IO even though we're
    7326             :                  * forcing a sync submission from here, since we can't
    7327             :                  * wait for request slots on the block side.
    7328             :                  */
    7329           0 :                 if (!needs_poll) {
    7330           0 :                         cond_resched();
    7331           0 :                         continue;
    7332             :                 }
    7333             : 
    7334           0 :                 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
    7335             :                         return;
    7336             :                 /* aborted or ready, in either case retry blocking */
    7337             :                 needs_poll = false;
    7338             :                 issue_flags &= ~IO_URING_F_NONBLOCK;
    7339             :         } while (1);
    7340             : 
    7341             :         /* avoid locking problems by failing it from a clean context */
    7342           0 :         if (ret)
    7343             :                 io_req_task_queue_fail(req, ret);
    7344             : }
    7345             : 
    7346             : static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
    7347             :                                                        unsigned i)
    7348             : {
    7349           0 :         return &table->files[i];
    7350             : }
    7351             : 
    7352             : static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
    7353             :                                               int index)
    7354             : {
    7355           0 :         struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
    7356             : 
    7357           0 :         return (struct file *) (slot->file_ptr & FFS_MASK);
    7358             : }
    7359             : 
    7360             : static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
    7361             : {
    7362           0 :         unsigned long file_ptr = (unsigned long) file;
    7363             : 
    7364           0 :         file_ptr |= io_file_get_flags(file);
    7365           0 :         file_slot->file_ptr = file_ptr;
    7366             : }
    7367             : 
    7368           0 : static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
    7369             :                                              unsigned int issue_flags)
    7370             : {
    7371           0 :         struct io_ring_ctx *ctx = req->ctx;
    7372           0 :         struct file *file = NULL;
    7373             :         unsigned long file_ptr;
    7374             : 
    7375           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
    7376           0 :                 mutex_lock(&ctx->uring_lock);
    7377             : 
    7378           0 :         if (unlikely((unsigned int)fd >= ctx->nr_user_files))
    7379             :                 goto out;
    7380           0 :         fd = array_index_nospec(fd, ctx->nr_user_files);
    7381           0 :         file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
    7382           0 :         file = (struct file *) (file_ptr & FFS_MASK);
    7383           0 :         file_ptr &= ~FFS_MASK;
    7384             :         /* mask in overlapping REQ_F and FFS bits */
    7385           0 :         req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
    7386           0 :         io_req_set_rsrc_node(req, ctx, 0);
    7387             : out:
    7388           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
    7389           0 :                 mutex_unlock(&ctx->uring_lock);
    7390           0 :         return file;
    7391             : }
    7392             : 
    7393             : /*
    7394             :  * Drop the file for requeue operations. Only used of req->file is the
    7395             :  * io_uring descriptor itself.
    7396             :  */
    7397             : static void io_drop_inflight_file(struct io_kiocb *req)
    7398             : {
    7399           0 :         if (unlikely(req->flags & REQ_F_INFLIGHT)) {
    7400           0 :                 fput(req->file);
    7401           0 :                 req->file = NULL;
    7402           0 :                 req->flags &= ~REQ_F_INFLIGHT;
    7403             :         }
    7404             : }
    7405             : 
    7406             : static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
    7407             : {
    7408           0 :         struct file *file = fget(fd);
    7409             : 
    7410           0 :         trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
    7411             : 
    7412             :         /* we don't allow fixed io_uring files */
    7413           0 :         if (file && file->f_op == &io_uring_fops)
    7414           0 :                 req->flags |= REQ_F_INFLIGHT;
    7415             :         return file;
    7416             : }
    7417             : 
    7418           0 : static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
    7419             : {
    7420           0 :         struct io_kiocb *prev = req->timeout.prev;
    7421           0 :         int ret = -ENOENT;
    7422             : 
    7423           0 :         if (prev) {
    7424           0 :                 if (!(req->task->flags & PF_EXITING))
    7425           0 :                         ret = io_try_cancel_userdata(req, prev->user_data);
    7426           0 :                 io_req_complete_post(req, ret ?: -ETIME, 0);
    7427           0 :                 io_put_req(prev);
    7428             :         } else {
    7429           0 :                 io_req_complete_post(req, -ETIME, 0);
    7430             :         }
    7431           0 : }
    7432             : 
    7433           0 : static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
    7434             : {
    7435           0 :         struct io_timeout_data *data = container_of(timer,
    7436             :                                                 struct io_timeout_data, timer);
    7437           0 :         struct io_kiocb *prev, *req = data->req;
    7438           0 :         struct io_ring_ctx *ctx = req->ctx;
    7439             :         unsigned long flags;
    7440             : 
    7441           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
    7442           0 :         prev = req->timeout.head;
    7443           0 :         req->timeout.head = NULL;
    7444             : 
    7445             :         /*
    7446             :          * We don't expect the list to be empty, that will only happen if we
    7447             :          * race with the completion of the linked work.
    7448             :          */
    7449           0 :         if (prev) {
    7450           0 :                 io_remove_next_linked(prev);
    7451           0 :                 if (!req_ref_inc_not_zero(prev))
    7452           0 :                         prev = NULL;
    7453             :         }
    7454           0 :         list_del(&req->timeout.list);
    7455           0 :         req->timeout.prev = prev;
    7456           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
    7457             : 
    7458           0 :         req->io_task_work.func = io_req_task_link_timeout;
    7459           0 :         io_req_task_work_add(req, false);
    7460           0 :         return HRTIMER_NORESTART;
    7461             : }
    7462             : 
    7463           0 : static void io_queue_linked_timeout(struct io_kiocb *req)
    7464             : {
    7465           0 :         struct io_ring_ctx *ctx = req->ctx;
    7466             : 
    7467           0 :         spin_lock_irq(&ctx->timeout_lock);
    7468             :         /*
    7469             :          * If the back reference is NULL, then our linked request finished
    7470             :          * before we got a chance to setup the timer
    7471             :          */
    7472           0 :         if (req->timeout.head) {
    7473           0 :                 struct io_timeout_data *data = req->async_data;
    7474             : 
    7475           0 :                 data->timer.function = io_link_timeout_fn;
    7476           0 :                 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
    7477             :                                 data->mode);
    7478           0 :                 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
    7479             :         }
    7480           0 :         spin_unlock_irq(&ctx->timeout_lock);
    7481             :         /* drop submission reference */
    7482           0 :         io_put_req(req);
    7483           0 : }
    7484             : 
    7485           0 : static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
    7486             :         __must_hold(&req->ctx->uring_lock)
    7487             : {
    7488           0 :         struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
    7489             : 
    7490           0 :         switch (io_arm_poll_handler(req, 0)) {
    7491             :         case IO_APOLL_READY:
    7492             :                 io_req_task_queue(req);
    7493             :                 break;
    7494             :         case IO_APOLL_ABORTED:
    7495             :                 /*
    7496             :                  * Queued up for async execution, worker will release
    7497             :                  * submit reference when the iocb is actually submitted.
    7498             :                  */
    7499           0 :                 io_queue_async_work(req, NULL);
    7500           0 :                 break;
    7501             :         case IO_APOLL_OK:
    7502             :                 break;
    7503             :         }
    7504             : 
    7505           0 :         if (linked_timeout)
    7506           0 :                 io_queue_linked_timeout(linked_timeout);
    7507           0 : }
    7508             : 
    7509           0 : static inline void __io_queue_sqe(struct io_kiocb *req)
    7510             :         __must_hold(&req->ctx->uring_lock)
    7511             : {
    7512             :         struct io_kiocb *linked_timeout;
    7513             :         int ret;
    7514             : 
    7515           0 :         ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
    7516             : 
    7517           0 :         if (req->flags & REQ_F_COMPLETE_INLINE) {
    7518             :                 io_req_add_compl_list(req);
    7519             :                 return;
    7520             :         }
    7521             :         /*
    7522             :          * We async punt it if the file wasn't marked NOWAIT, or if the file
    7523             :          * doesn't support non-blocking read/write attempts
    7524             :          */
    7525           0 :         if (likely(!ret)) {
    7526           0 :                 linked_timeout = io_prep_linked_timeout(req);
    7527           0 :                 if (linked_timeout)
    7528           0 :                         io_queue_linked_timeout(linked_timeout);
    7529           0 :         } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
    7530           0 :                 io_queue_sqe_arm_apoll(req);
    7531             :         } else {
    7532           0 :                 io_req_complete_failed(req, ret);
    7533             :         }
    7534             : }
    7535             : 
    7536           0 : static void io_queue_sqe_fallback(struct io_kiocb *req)
    7537             :         __must_hold(&req->ctx->uring_lock)
    7538             : {
    7539           0 :         if (req->flags & REQ_F_FAIL) {
    7540             :                 io_req_complete_fail_submit(req);
    7541           0 :         } else if (unlikely(req->ctx->drain_active)) {
    7542           0 :                 io_drain_req(req);
    7543             :         } else {
    7544           0 :                 int ret = io_req_prep_async(req);
    7545             : 
    7546           0 :                 if (unlikely(ret))
    7547           0 :                         io_req_complete_failed(req, ret);
    7548             :                 else
    7549           0 :                         io_queue_async_work(req, NULL);
    7550             :         }
    7551           0 : }
    7552             : 
    7553           0 : static inline void io_queue_sqe(struct io_kiocb *req)
    7554             :         __must_hold(&req->ctx->uring_lock)
    7555             : {
    7556           0 :         if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))))
    7557           0 :                 __io_queue_sqe(req);
    7558             :         else
    7559           0 :                 io_queue_sqe_fallback(req);
    7560           0 : }
    7561             : 
    7562             : /*
    7563             :  * Check SQE restrictions (opcode and flags).
    7564             :  *
    7565             :  * Returns 'true' if SQE is allowed, 'false' otherwise.
    7566             :  */
    7567           0 : static inline bool io_check_restriction(struct io_ring_ctx *ctx,
    7568             :                                         struct io_kiocb *req,
    7569             :                                         unsigned int sqe_flags)
    7570             : {
    7571           0 :         if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
    7572             :                 return false;
    7573             : 
    7574           0 :         if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
    7575             :             ctx->restrictions.sqe_flags_required)
    7576             :                 return false;
    7577             : 
    7578           0 :         if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
    7579             :                           ctx->restrictions.sqe_flags_required))
    7580             :                 return false;
    7581             : 
    7582             :         return true;
    7583             : }
    7584             : 
    7585             : static void io_init_req_drain(struct io_kiocb *req)
    7586             : {
    7587           0 :         struct io_ring_ctx *ctx = req->ctx;
    7588           0 :         struct io_kiocb *head = ctx->submit_state.link.head;
    7589             : 
    7590           0 :         ctx->drain_active = true;
    7591           0 :         if (head) {
    7592             :                 /*
    7593             :                  * If we need to drain a request in the middle of a link, drain
    7594             :                  * the head request and the next request/link after the current
    7595             :                  * link. Considering sequential execution of links,
    7596             :                  * REQ_F_IO_DRAIN will be maintained for every request of our
    7597             :                  * link.
    7598             :                  */
    7599           0 :                 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
    7600           0 :                 ctx->drain_next = true;
    7601             :         }
    7602             : }
    7603             : 
    7604           0 : static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
    7605             :                        const struct io_uring_sqe *sqe)
    7606             :         __must_hold(&ctx->uring_lock)
    7607             : {
    7608             :         unsigned int sqe_flags;
    7609             :         int personality;
    7610             :         u8 opcode;
    7611             : 
    7612             :         /* req is partially pre-initialised, see io_preinit_req() */
    7613           0 :         req->opcode = opcode = READ_ONCE(sqe->opcode);
    7614             :         /* same numerical values with corresponding REQ_F_*, safe to copy */
    7615           0 :         req->flags = sqe_flags = READ_ONCE(sqe->flags);
    7616           0 :         req->user_data = READ_ONCE(sqe->user_data);
    7617           0 :         req->file = NULL;
    7618           0 :         req->fixed_rsrc_refs = NULL;
    7619           0 :         req->task = current;
    7620             : 
    7621           0 :         if (unlikely(opcode >= IORING_OP_LAST)) {
    7622           0 :                 req->opcode = 0;
    7623           0 :                 return -EINVAL;
    7624             :         }
    7625           0 :         if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
    7626             :                 /* enforce forwards compatibility on users */
    7627           0 :                 if (sqe_flags & ~SQE_VALID_FLAGS)
    7628             :                         return -EINVAL;
    7629           0 :                 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
    7630           0 :                     !io_op_defs[opcode].buffer_select)
    7631             :                         return -EOPNOTSUPP;
    7632           0 :                 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
    7633           0 :                         ctx->drain_disabled = true;
    7634           0 :                 if (sqe_flags & IOSQE_IO_DRAIN) {
    7635           0 :                         if (ctx->drain_disabled)
    7636             :                                 return -EOPNOTSUPP;
    7637           0 :                         io_init_req_drain(req);
    7638             :                 }
    7639             :         }
    7640           0 :         if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
    7641           0 :                 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
    7642             :                         return -EACCES;
    7643             :                 /* knock it to the slow queue path, will be drained there */
    7644           0 :                 if (ctx->drain_active)
    7645           0 :                         req->flags |= REQ_F_FORCE_ASYNC;
    7646             :                 /* if there is no link, we're at "next" request and need to drain */
    7647           0 :                 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
    7648           0 :                         ctx->drain_next = false;
    7649           0 :                         ctx->drain_active = true;
    7650           0 :                         req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
    7651             :                 }
    7652             :         }
    7653             : 
    7654           0 :         if (io_op_defs[opcode].needs_file) {
    7655           0 :                 struct io_submit_state *state = &ctx->submit_state;
    7656             : 
    7657           0 :                 req->fd = READ_ONCE(sqe->fd);
    7658             : 
    7659             :                 /*
    7660             :                  * Plug now if we have more than 2 IO left after this, and the
    7661             :                  * target is potentially a read/write to block based storage.
    7662             :                  */
    7663           0 :                 if (state->need_plug && io_op_defs[opcode].plug) {
    7664           0 :                         state->plug_started = true;
    7665           0 :                         state->need_plug = false;
    7666           0 :                         blk_start_plug_nr_ios(&state->plug, state->submit_nr);
    7667             :                 }
    7668             :         }
    7669             : 
    7670           0 :         personality = READ_ONCE(sqe->personality);
    7671           0 :         if (personality) {
    7672             :                 int ret;
    7673             : 
    7674           0 :                 req->creds = xa_load(&ctx->personalities, personality);
    7675           0 :                 if (!req->creds)
    7676             :                         return -EINVAL;
    7677           0 :                 get_cred(req->creds);
    7678           0 :                 ret = security_uring_override_creds(req->creds);
    7679             :                 if (ret) {
    7680             :                         put_cred(req->creds);
    7681             :                         return ret;
    7682             :                 }
    7683           0 :                 req->flags |= REQ_F_CREDS;
    7684             :         }
    7685             : 
    7686           0 :         return io_req_prep(req, sqe);
    7687             : }
    7688             : 
    7689           0 : static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
    7690             :                          const struct io_uring_sqe *sqe)
    7691             :         __must_hold(&ctx->uring_lock)
    7692             : {
    7693           0 :         struct io_submit_link *link = &ctx->submit_state.link;
    7694             :         int ret;
    7695             : 
    7696           0 :         ret = io_init_req(ctx, req, sqe);
    7697           0 :         if (unlikely(ret)) {
    7698           0 :                 trace_io_uring_req_failed(sqe, ctx, req, ret);
    7699             : 
    7700             :                 /* fail even hard links since we don't submit */
    7701           0 :                 if (link->head) {
    7702             :                         /*
    7703             :                          * we can judge a link req is failed or cancelled by if
    7704             :                          * REQ_F_FAIL is set, but the head is an exception since
    7705             :                          * it may be set REQ_F_FAIL because of other req's failure
    7706             :                          * so let's leverage req->result to distinguish if a head
    7707             :                          * is set REQ_F_FAIL because of its failure or other req's
    7708             :                          * failure so that we can set the correct ret code for it.
    7709             :                          * init result here to avoid affecting the normal path.
    7710             :                          */
    7711           0 :                         if (!(link->head->flags & REQ_F_FAIL))
    7712           0 :                                 req_fail_link_node(link->head, -ECANCELED);
    7713           0 :                 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
    7714             :                         /*
    7715             :                          * the current req is a normal req, we should return
    7716             :                          * error and thus break the submittion loop.
    7717             :                          */
    7718           0 :                         io_req_complete_failed(req, ret);
    7719           0 :                         return ret;
    7720             :                 }
    7721             :                 req_fail_link_node(req, ret);
    7722             :         }
    7723             : 
    7724             :         /* don't need @sqe from now on */
    7725           0 :         trace_io_uring_submit_sqe(ctx, req, req->user_data, req->opcode,
    7726             :                                   req->flags, true,
    7727           0 :                                   ctx->flags & IORING_SETUP_SQPOLL);
    7728             : 
    7729             :         /*
    7730             :          * If we already have a head request, queue this one for async
    7731             :          * submittal once the head completes. If we don't have a head but
    7732             :          * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
    7733             :          * submitted sync once the chain is complete. If none of those
    7734             :          * conditions are true (normal request), then just queue it.
    7735             :          */
    7736           0 :         if (link->head) {
    7737           0 :                 struct io_kiocb *head = link->head;
    7738             : 
    7739           0 :                 if (!(req->flags & REQ_F_FAIL)) {
    7740           0 :                         ret = io_req_prep_async(req);
    7741           0 :                         if (unlikely(ret)) {
    7742           0 :                                 req_fail_link_node(req, ret);
    7743           0 :                                 if (!(head->flags & REQ_F_FAIL))
    7744             :                                         req_fail_link_node(head, -ECANCELED);
    7745             :                         }
    7746             :                 }
    7747           0 :                 trace_io_uring_link(ctx, req, head);
    7748           0 :                 link->last->link = req;
    7749           0 :                 link->last = req;
    7750             : 
    7751           0 :                 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
    7752             :                         return 0;
    7753             :                 /* last request of a link, enqueue the link */
    7754           0 :                 link->head = NULL;
    7755           0 :                 req = head;
    7756           0 :         } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
    7757           0 :                 link->head = req;
    7758           0 :                 link->last = req;
    7759           0 :                 return 0;
    7760             :         }
    7761             : 
    7762           0 :         io_queue_sqe(req);
    7763           0 :         return 0;
    7764             : }
    7765             : 
    7766             : /*
    7767             :  * Batched submission is done, ensure local IO is flushed out.
    7768             :  */
    7769           0 : static void io_submit_state_end(struct io_ring_ctx *ctx)
    7770             : {
    7771           0 :         struct io_submit_state *state = &ctx->submit_state;
    7772             : 
    7773           0 :         if (state->link.head)
    7774           0 :                 io_queue_sqe(state->link.head);
    7775             :         /* flush only after queuing links as they can generate completions */
    7776           0 :         io_submit_flush_completions(ctx);
    7777           0 :         if (state->plug_started)
    7778           0 :                 blk_finish_plug(&state->plug);
    7779           0 : }
    7780             : 
    7781             : /*
    7782             :  * Start submission side cache.
    7783             :  */
    7784             : static void io_submit_state_start(struct io_submit_state *state,
    7785             :                                   unsigned int max_ios)
    7786             : {
    7787           0 :         state->plug_started = false;
    7788           0 :         state->need_plug = max_ios > 2;
    7789           0 :         state->submit_nr = max_ios;
    7790             :         /* set only head, no need to init link_last in advance */
    7791           0 :         state->link.head = NULL;
    7792             : }
    7793             : 
    7794             : static void io_commit_sqring(struct io_ring_ctx *ctx)
    7795             : {
    7796           0 :         struct io_rings *rings = ctx->rings;
    7797             : 
    7798             :         /*
    7799             :          * Ensure any loads from the SQEs are done at this point,
    7800             :          * since once we write the new head, the application could
    7801             :          * write new data to them.
    7802             :          */
    7803           0 :         smp_store_release(&rings->sq.head, ctx->cached_sq_head);
    7804             : }
    7805             : 
    7806             : /*
    7807             :  * Fetch an sqe, if one is available. Note this returns a pointer to memory
    7808             :  * that is mapped by userspace. This means that care needs to be taken to
    7809             :  * ensure that reads are stable, as we cannot rely on userspace always
    7810             :  * being a good citizen. If members of the sqe are validated and then later
    7811             :  * used, it's important that those reads are done through READ_ONCE() to
    7812             :  * prevent a re-load down the line.
    7813             :  */
    7814           0 : static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
    7815             : {
    7816           0 :         unsigned head, mask = ctx->sq_entries - 1;
    7817           0 :         unsigned sq_idx = ctx->cached_sq_head++ & mask;
    7818             : 
    7819             :         /*
    7820             :          * The cached sq head (or cq tail) serves two purposes:
    7821             :          *
    7822             :          * 1) allows us to batch the cost of updating the user visible
    7823             :          *    head updates.
    7824             :          * 2) allows the kernel side to track the head on its own, even
    7825             :          *    though the application is the one updating it.
    7826             :          */
    7827           0 :         head = READ_ONCE(ctx->sq_array[sq_idx]);
    7828           0 :         if (likely(head < ctx->sq_entries))
    7829           0 :                 return &ctx->sq_sqes[head];
    7830             : 
    7831             :         /* drop invalid entries */
    7832           0 :         ctx->cq_extra--;
    7833           0 :         WRITE_ONCE(ctx->rings->sq_dropped,
    7834             :                    READ_ONCE(ctx->rings->sq_dropped) + 1);
    7835           0 :         return NULL;
    7836             : }
    7837             : 
    7838           0 : static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
    7839             :         __must_hold(&ctx->uring_lock)
    7840             : {
    7841           0 :         unsigned int entries = io_sqring_entries(ctx);
    7842           0 :         int submitted = 0;
    7843             : 
    7844           0 :         if (unlikely(!entries))
    7845             :                 return 0;
    7846             :         /* make sure SQ entry isn't read before tail */
    7847           0 :         nr = min3(nr, ctx->sq_entries, entries);
    7848           0 :         io_get_task_refs(nr);
    7849             : 
    7850           0 :         io_submit_state_start(&ctx->submit_state, nr);
    7851             :         do {
    7852             :                 const struct io_uring_sqe *sqe;
    7853             :                 struct io_kiocb *req;
    7854             : 
    7855           0 :                 if (unlikely(!io_alloc_req_refill(ctx))) {
    7856           0 :                         if (!submitted)
    7857           0 :                                 submitted = -EAGAIN;
    7858             :                         break;
    7859             :                 }
    7860           0 :                 req = io_alloc_req(ctx);
    7861           0 :                 sqe = io_get_sqe(ctx);
    7862           0 :                 if (unlikely(!sqe)) {
    7863           0 :                         wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
    7864             :                         break;
    7865             :                 }
    7866             :                 /* will complete beyond this point, count as submitted */
    7867           0 :                 submitted++;
    7868           0 :                 if (io_submit_sqe(ctx, req, sqe)) {
    7869             :                         /*
    7870             :                          * Continue submitting even for sqe failure if the
    7871             :                          * ring was setup with IORING_SETUP_SUBMIT_ALL
    7872             :                          */
    7873           0 :                         if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL))
    7874             :                                 break;
    7875             :                 }
    7876           0 :         } while (submitted < nr);
    7877             : 
    7878           0 :         if (unlikely(submitted != nr)) {
    7879           0 :                 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
    7880           0 :                 int unused = nr - ref_used;
    7881             : 
    7882           0 :                 current->io_uring->cached_refs += unused;
    7883             :         }
    7884             : 
    7885           0 :         io_submit_state_end(ctx);
    7886             :          /* Commit SQ ring head once we've consumed and submitted all SQEs */
    7887           0 :         io_commit_sqring(ctx);
    7888             : 
    7889           0 :         return submitted;
    7890             : }
    7891             : 
    7892             : static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
    7893             : {
    7894           0 :         return READ_ONCE(sqd->state);
    7895             : }
    7896             : 
    7897             : static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
    7898             : {
    7899             :         /* Tell userspace we may need a wakeup call */
    7900           0 :         spin_lock(&ctx->completion_lock);
    7901           0 :         WRITE_ONCE(ctx->rings->sq_flags,
    7902             :                    ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
    7903           0 :         spin_unlock(&ctx->completion_lock);
    7904             : }
    7905             : 
    7906             : static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
    7907             : {
    7908           0 :         spin_lock(&ctx->completion_lock);
    7909           0 :         WRITE_ONCE(ctx->rings->sq_flags,
    7910             :                    ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
    7911           0 :         spin_unlock(&ctx->completion_lock);
    7912             : }
    7913             : 
    7914           0 : static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
    7915             : {
    7916             :         unsigned int to_submit;
    7917           0 :         int ret = 0;
    7918             : 
    7919           0 :         to_submit = io_sqring_entries(ctx);
    7920             :         /* if we're handling multiple rings, cap submit size for fairness */
    7921           0 :         if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
    7922           0 :                 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
    7923             : 
    7924           0 :         if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
    7925           0 :                 const struct cred *creds = NULL;
    7926             : 
    7927           0 :                 if (ctx->sq_creds != current_cred())
    7928           0 :                         creds = override_creds(ctx->sq_creds);
    7929             : 
    7930           0 :                 mutex_lock(&ctx->uring_lock);
    7931           0 :                 if (!wq_list_empty(&ctx->iopoll_list))
    7932           0 :                         io_do_iopoll(ctx, true);
    7933             : 
    7934             :                 /*
    7935             :                  * Don't submit if refs are dying, good for io_uring_register(),
    7936             :                  * but also it is relied upon by io_ring_exit_work()
    7937             :                  */
    7938           0 :                 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
    7939           0 :                     !(ctx->flags & IORING_SETUP_R_DISABLED))
    7940           0 :                         ret = io_submit_sqes(ctx, to_submit);
    7941           0 :                 mutex_unlock(&ctx->uring_lock);
    7942             : 
    7943           0 :                 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
    7944           0 :                         wake_up(&ctx->sqo_sq_wait);
    7945           0 :                 if (creds)
    7946           0 :                         revert_creds(creds);
    7947             :         }
    7948             : 
    7949           0 :         return ret;
    7950             : }
    7951             : 
    7952           0 : static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
    7953             : {
    7954             :         struct io_ring_ctx *ctx;
    7955           0 :         unsigned sq_thread_idle = 0;
    7956             : 
    7957           0 :         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
    7958           0 :                 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
    7959           0 :         sqd->sq_thread_idle = sq_thread_idle;
    7960           0 : }
    7961             : 
    7962           0 : static bool io_sqd_handle_event(struct io_sq_data *sqd)
    7963             : {
    7964           0 :         bool did_sig = false;
    7965             :         struct ksignal ksig;
    7966             : 
    7967           0 :         if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
    7968           0 :             signal_pending(current)) {
    7969           0 :                 mutex_unlock(&sqd->lock);
    7970           0 :                 if (signal_pending(current))
    7971           0 :                         did_sig = get_signal(&ksig);
    7972           0 :                 cond_resched();
    7973           0 :                 mutex_lock(&sqd->lock);
    7974             :         }
    7975           0 :         return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
    7976             : }
    7977             : 
    7978           0 : static int io_sq_thread(void *data)
    7979             : {
    7980           0 :         struct io_sq_data *sqd = data;
    7981             :         struct io_ring_ctx *ctx;
    7982           0 :         unsigned long timeout = 0;
    7983             :         char buf[TASK_COMM_LEN];
    7984           0 :         DEFINE_WAIT(wait);
    7985             : 
    7986           0 :         snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
    7987           0 :         set_task_comm(current, buf);
    7988             : 
    7989           0 :         if (sqd->sq_cpu != -1)
    7990           0 :                 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
    7991             :         else
    7992           0 :                 set_cpus_allowed_ptr(current, cpu_online_mask);
    7993           0 :         current->flags |= PF_NO_SETAFFINITY;
    7994             : 
    7995           0 :         audit_alloc_kernel(current);
    7996             : 
    7997           0 :         mutex_lock(&sqd->lock);
    7998             :         while (1) {
    7999           0 :                 bool cap_entries, sqt_spin = false;
    8000             : 
    8001           0 :                 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
    8002           0 :                         if (io_sqd_handle_event(sqd))
    8003             :                                 break;
    8004           0 :                         timeout = jiffies + sqd->sq_thread_idle;
    8005             :                 }
    8006             : 
    8007           0 :                 cap_entries = !list_is_singular(&sqd->ctx_list);
    8008           0 :                 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
    8009           0 :                         int ret = __io_sq_thread(ctx, cap_entries);
    8010             : 
    8011           0 :                         if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
    8012           0 :                                 sqt_spin = true;
    8013             :                 }
    8014           0 :                 if (io_run_task_work())
    8015           0 :                         sqt_spin = true;
    8016             : 
    8017           0 :                 if (sqt_spin || !time_after(jiffies, timeout)) {
    8018           0 :                         cond_resched();
    8019           0 :                         if (sqt_spin)
    8020           0 :                                 timeout = jiffies + sqd->sq_thread_idle;
    8021           0 :                         continue;
    8022             :                 }
    8023             : 
    8024           0 :                 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
    8025           0 :                 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
    8026           0 :                         bool needs_sched = true;
    8027             : 
    8028           0 :                         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
    8029           0 :                                 io_ring_set_wakeup_flag(ctx);
    8030             : 
    8031           0 :                                 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
    8032           0 :                                     !wq_list_empty(&ctx->iopoll_list)) {
    8033             :                                         needs_sched = false;
    8034             :                                         break;
    8035             :                                 }
    8036             : 
    8037             :                                 /*
    8038             :                                  * Ensure the store of the wakeup flag is not
    8039             :                                  * reordered with the load of the SQ tail
    8040             :                                  */
    8041           0 :                                 smp_mb();
    8042             : 
    8043           0 :                                 if (io_sqring_entries(ctx)) {
    8044             :                                         needs_sched = false;
    8045             :                                         break;
    8046             :                                 }
    8047             :                         }
    8048             : 
    8049           0 :                         if (needs_sched) {
    8050           0 :                                 mutex_unlock(&sqd->lock);
    8051           0 :                                 schedule();
    8052           0 :                                 mutex_lock(&sqd->lock);
    8053             :                         }
    8054           0 :                         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
    8055           0 :                                 io_ring_clear_wakeup_flag(ctx);
    8056             :                 }
    8057             : 
    8058           0 :                 finish_wait(&sqd->wait, &wait);
    8059           0 :                 timeout = jiffies + sqd->sq_thread_idle;
    8060             :         }
    8061             : 
    8062           0 :         io_uring_cancel_generic(true, sqd);
    8063           0 :         sqd->thread = NULL;
    8064           0 :         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
    8065           0 :                 io_ring_set_wakeup_flag(ctx);
    8066           0 :         io_run_task_work();
    8067           0 :         mutex_unlock(&sqd->lock);
    8068             : 
    8069           0 :         audit_free(current);
    8070             : 
    8071           0 :         complete(&sqd->exited);
    8072           0 :         do_exit(0);
    8073             : }
    8074             : 
    8075             : struct io_wait_queue {
    8076             :         struct wait_queue_entry wq;
    8077             :         struct io_ring_ctx *ctx;
    8078             :         unsigned cq_tail;
    8079             :         unsigned nr_timeouts;
    8080             : };
    8081             : 
    8082             : static inline bool io_should_wake(struct io_wait_queue *iowq)
    8083             : {
    8084           0 :         struct io_ring_ctx *ctx = iowq->ctx;
    8085           0 :         int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
    8086             : 
    8087             :         /*
    8088             :          * Wake up if we have enough events, or if a timeout occurred since we
    8089             :          * started waiting. For timeouts, we always want to return to userspace,
    8090             :          * regardless of event count.
    8091             :          */
    8092           0 :         return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
    8093             : }
    8094             : 
    8095           0 : static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
    8096             :                             int wake_flags, void *key)
    8097             : {
    8098           0 :         struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
    8099             :                                                         wq);
    8100             : 
    8101             :         /*
    8102             :          * Cannot safely flush overflowed CQEs from here, ensure we wake up
    8103             :          * the task, and the next invocation will do it.
    8104             :          */
    8105           0 :         if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
    8106           0 :                 return autoremove_wake_function(curr, mode, wake_flags, key);
    8107             :         return -1;
    8108             : }
    8109             : 
    8110           0 : static int io_run_task_work_sig(void)
    8111             : {
    8112           0 :         if (io_run_task_work())
    8113             :                 return 1;
    8114           0 :         if (test_thread_flag(TIF_NOTIFY_SIGNAL))
    8115             :                 return -ERESTARTSYS;
    8116           0 :         if (task_sigpending(current))
    8117             :                 return -EINTR;
    8118           0 :         return 0;
    8119             : }
    8120             : 
    8121             : /* when returns >0, the caller should retry */
    8122           0 : static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
    8123             :                                           struct io_wait_queue *iowq,
    8124             :                                           ktime_t timeout)
    8125             : {
    8126             :         int ret;
    8127             : 
    8128             :         /* make sure we run task_work before checking for signals */
    8129           0 :         ret = io_run_task_work_sig();
    8130           0 :         if (ret || io_should_wake(iowq))
    8131             :                 return ret;
    8132             :         /* let the caller flush overflows, retry */
    8133           0 :         if (test_bit(0, &ctx->check_cq_overflow))
    8134             :                 return 1;
    8135             : 
    8136           0 :         if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
    8137             :                 return -ETIME;
    8138           0 :         return 1;
    8139             : }
    8140             : 
    8141             : /*
    8142             :  * Wait until events become available, if we don't already have some. The
    8143             :  * application must reap them itself, as they reside on the shared cq ring.
    8144             :  */
    8145           0 : static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
    8146             :                           const sigset_t __user *sig, size_t sigsz,
    8147             :                           struct __kernel_timespec __user *uts)
    8148             : {
    8149             :         struct io_wait_queue iowq;
    8150           0 :         struct io_rings *rings = ctx->rings;
    8151           0 :         ktime_t timeout = KTIME_MAX;
    8152             :         int ret;
    8153             : 
    8154             :         do {
    8155           0 :                 io_cqring_overflow_flush(ctx);
    8156           0 :                 if (io_cqring_events(ctx) >= min_events)
    8157             :                         return 0;
    8158           0 :                 if (!io_run_task_work())
    8159             :                         break;
    8160             :         } while (1);
    8161             : 
    8162           0 :         if (sig) {
    8163             : #ifdef CONFIG_COMPAT
    8164             :                 if (in_compat_syscall())
    8165             :                         ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
    8166             :                                                       sigsz);
    8167             :                 else
    8168             : #endif
    8169           0 :                         ret = set_user_sigmask(sig, sigsz);
    8170             : 
    8171           0 :                 if (ret)
    8172             :                         return ret;
    8173             :         }
    8174             : 
    8175           0 :         if (uts) {
    8176             :                 struct timespec64 ts;
    8177             : 
    8178           0 :                 if (get_timespec64(&ts, uts))
    8179           0 :                         return -EFAULT;
    8180           0 :                 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
    8181             :         }
    8182             : 
    8183           0 :         init_waitqueue_func_entry(&iowq.wq, io_wake_function);
    8184           0 :         iowq.wq.private = current;
    8185           0 :         INIT_LIST_HEAD(&iowq.wq.entry);
    8186           0 :         iowq.ctx = ctx;
    8187           0 :         iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
    8188           0 :         iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
    8189             : 
    8190           0 :         trace_io_uring_cqring_wait(ctx, min_events);
    8191             :         do {
    8192             :                 /* if we can't even flush overflow, don't wait for more */
    8193           0 :                 if (!io_cqring_overflow_flush(ctx)) {
    8194             :                         ret = -EBUSY;
    8195             :                         break;
    8196             :                 }
    8197           0 :                 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
    8198             :                                                 TASK_INTERRUPTIBLE);
    8199           0 :                 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
    8200           0 :                 finish_wait(&ctx->cq_wait, &iowq.wq);
    8201           0 :                 cond_resched();
    8202           0 :         } while (ret > 0);
    8203             : 
    8204           0 :         restore_saved_sigmask_unless(ret == -EINTR);
    8205             : 
    8206           0 :         return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
    8207             : }
    8208             : 
    8209           0 : static void io_free_page_table(void **table, size_t size)
    8210             : {
    8211           0 :         unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
    8212             : 
    8213           0 :         for (i = 0; i < nr_tables; i++)
    8214           0 :                 kfree(table[i]);
    8215           0 :         kfree(table);
    8216           0 : }
    8217             : 
    8218           0 : static __cold void **io_alloc_page_table(size_t size)
    8219             : {
    8220           0 :         unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
    8221           0 :         size_t init_size = size;
    8222             :         void **table;
    8223             : 
    8224           0 :         table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
    8225           0 :         if (!table)
    8226             :                 return NULL;
    8227             : 
    8228           0 :         for (i = 0; i < nr_tables; i++) {
    8229           0 :                 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
    8230             : 
    8231           0 :                 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
    8232           0 :                 if (!table[i]) {
    8233           0 :                         io_free_page_table(table, init_size);
    8234           0 :                         return NULL;
    8235             :                 }
    8236           0 :                 size -= this_size;
    8237             :         }
    8238             :         return table;
    8239             : }
    8240             : 
    8241             : static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
    8242             : {
    8243           0 :         percpu_ref_exit(&ref_node->refs);
    8244           0 :         kfree(ref_node);
    8245             : }
    8246             : 
    8247           0 : static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
    8248             : {
    8249           0 :         struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
    8250           0 :         struct io_ring_ctx *ctx = node->rsrc_data->ctx;
    8251             :         unsigned long flags;
    8252           0 :         bool first_add = false;
    8253           0 :         unsigned long delay = HZ;
    8254             : 
    8255           0 :         spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
    8256           0 :         node->done = true;
    8257             : 
    8258             :         /* if we are mid-quiesce then do not delay */
    8259           0 :         if (node->rsrc_data->quiesce)
    8260           0 :                 delay = 0;
    8261             : 
    8262           0 :         while (!list_empty(&ctx->rsrc_ref_list)) {
    8263           0 :                 node = list_first_entry(&ctx->rsrc_ref_list,
    8264             :                                             struct io_rsrc_node, node);
    8265             :                 /* recycle ref nodes in order */
    8266           0 :                 if (!node->done)
    8267             :                         break;
    8268           0 :                 list_del(&node->node);
    8269           0 :                 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
    8270             :         }
    8271           0 :         spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
    8272             : 
    8273           0 :         if (first_add)
    8274           0 :                 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
    8275           0 : }
    8276             : 
    8277           0 : static struct io_rsrc_node *io_rsrc_node_alloc(void)
    8278             : {
    8279             :         struct io_rsrc_node *ref_node;
    8280             : 
    8281           0 :         ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
    8282           0 :         if (!ref_node)
    8283             :                 return NULL;
    8284             : 
    8285           0 :         if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
    8286             :                             0, GFP_KERNEL)) {
    8287           0 :                 kfree(ref_node);
    8288           0 :                 return NULL;
    8289             :         }
    8290           0 :         INIT_LIST_HEAD(&ref_node->node);
    8291           0 :         INIT_LIST_HEAD(&ref_node->rsrc_list);
    8292           0 :         ref_node->done = false;
    8293           0 :         return ref_node;
    8294             : }
    8295             : 
    8296           0 : static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
    8297             :                                 struct io_rsrc_data *data_to_kill)
    8298             :         __must_hold(&ctx->uring_lock)
    8299             : {
    8300           0 :         WARN_ON_ONCE(!ctx->rsrc_backup_node);
    8301           0 :         WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
    8302             : 
    8303           0 :         io_rsrc_refs_drop(ctx);
    8304             : 
    8305           0 :         if (data_to_kill) {
    8306           0 :                 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
    8307             : 
    8308           0 :                 rsrc_node->rsrc_data = data_to_kill;
    8309           0 :                 spin_lock_irq(&ctx->rsrc_ref_lock);
    8310           0 :                 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
    8311           0 :                 spin_unlock_irq(&ctx->rsrc_ref_lock);
    8312             : 
    8313           0 :                 atomic_inc(&data_to_kill->refs);
    8314           0 :                 percpu_ref_kill(&rsrc_node->refs);
    8315           0 :                 ctx->rsrc_node = NULL;
    8316             :         }
    8317             : 
    8318           0 :         if (!ctx->rsrc_node) {
    8319           0 :                 ctx->rsrc_node = ctx->rsrc_backup_node;
    8320           0 :                 ctx->rsrc_backup_node = NULL;
    8321             :         }
    8322           0 : }
    8323             : 
    8324             : static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
    8325             : {
    8326           0 :         if (ctx->rsrc_backup_node)
    8327             :                 return 0;
    8328           0 :         ctx->rsrc_backup_node = io_rsrc_node_alloc();
    8329           0 :         return ctx->rsrc_backup_node ? 0 : -ENOMEM;
    8330             : }
    8331             : 
    8332           0 : static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
    8333             :                                       struct io_ring_ctx *ctx)
    8334             : {
    8335             :         int ret;
    8336             : 
    8337             :         /* As we may drop ->uring_lock, other task may have started quiesce */
    8338           0 :         if (data->quiesce)
    8339             :                 return -ENXIO;
    8340             : 
    8341           0 :         data->quiesce = true;
    8342             :         do {
    8343           0 :                 ret = io_rsrc_node_switch_start(ctx);
    8344           0 :                 if (ret)
    8345             :                         break;
    8346           0 :                 io_rsrc_node_switch(ctx, data);
    8347             : 
    8348             :                 /* kill initial ref, already quiesced if zero */
    8349           0 :                 if (atomic_dec_and_test(&data->refs))
    8350             :                         break;
    8351           0 :                 mutex_unlock(&ctx->uring_lock);
    8352           0 :                 flush_delayed_work(&ctx->rsrc_put_work);
    8353           0 :                 ret = wait_for_completion_interruptible(&data->done);
    8354           0 :                 if (!ret) {
    8355           0 :                         mutex_lock(&ctx->uring_lock);
    8356           0 :                         if (atomic_read(&data->refs) > 0) {
    8357             :                                 /*
    8358             :                                  * it has been revived by another thread while
    8359             :                                  * we were unlocked
    8360             :                                  */
    8361           0 :                                 mutex_unlock(&ctx->uring_lock);
    8362             :                         } else {
    8363             :                                 break;
    8364             :                         }
    8365             :                 }
    8366             : 
    8367           0 :                 atomic_inc(&data->refs);
    8368             :                 /* wait for all works potentially completing data->done */
    8369           0 :                 flush_delayed_work(&ctx->rsrc_put_work);
    8370           0 :                 reinit_completion(&data->done);
    8371             : 
    8372           0 :                 ret = io_run_task_work_sig();
    8373           0 :                 mutex_lock(&ctx->uring_lock);
    8374           0 :         } while (ret >= 0);
    8375           0 :         data->quiesce = false;
    8376             : 
    8377           0 :         return ret;
    8378             : }
    8379             : 
    8380             : static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
    8381             : {
    8382           0 :         unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
    8383           0 :         unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
    8384             : 
    8385           0 :         return &data->tags[table_idx][off];
    8386             : }
    8387             : 
    8388           0 : static void io_rsrc_data_free(struct io_rsrc_data *data)
    8389             : {
    8390           0 :         size_t size = data->nr * sizeof(data->tags[0][0]);
    8391             : 
    8392           0 :         if (data->tags)
    8393           0 :                 io_free_page_table((void **)data->tags, size);
    8394           0 :         kfree(data);
    8395           0 : }
    8396             : 
    8397           0 : static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
    8398             :                                      u64 __user *utags, unsigned nr,
    8399             :                                      struct io_rsrc_data **pdata)
    8400             : {
    8401             :         struct io_rsrc_data *data;
    8402           0 :         int ret = -ENOMEM;
    8403             :         unsigned i;
    8404             : 
    8405           0 :         data = kzalloc(sizeof(*data), GFP_KERNEL);
    8406           0 :         if (!data)
    8407             :                 return -ENOMEM;
    8408           0 :         data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
    8409           0 :         if (!data->tags) {
    8410           0 :                 kfree(data);
    8411           0 :                 return -ENOMEM;
    8412             :         }
    8413             : 
    8414           0 :         data->nr = nr;
    8415           0 :         data->ctx = ctx;
    8416           0 :         data->do_put = do_put;
    8417           0 :         if (utags) {
    8418             :                 ret = -EFAULT;
    8419           0 :                 for (i = 0; i < nr; i++) {
    8420           0 :                         u64 *tag_slot = io_get_tag_slot(data, i);
    8421             : 
    8422           0 :                         if (copy_from_user(tag_slot, &utags[i],
    8423             :                                            sizeof(*tag_slot)))
    8424             :                                 goto fail;
    8425             :                 }
    8426             :         }
    8427             : 
    8428           0 :         atomic_set(&data->refs, 1);
    8429           0 :         init_completion(&data->done);
    8430           0 :         *pdata = data;
    8431           0 :         return 0;
    8432             : fail:
    8433           0 :         io_rsrc_data_free(data);
    8434           0 :         return ret;
    8435             : }
    8436             : 
    8437             : static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
    8438             : {
    8439           0 :         table->files = kvcalloc(nr_files, sizeof(table->files[0]),
    8440             :                                 GFP_KERNEL_ACCOUNT);
    8441             :         return !!table->files;
    8442             : }
    8443             : 
    8444             : static void io_free_file_tables(struct io_file_table *table)
    8445             : {
    8446           0 :         kvfree(table->files);
    8447           0 :         table->files = NULL;
    8448             : }
    8449             : 
    8450           0 : static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
    8451             : {
    8452             : #if defined(CONFIG_UNIX)
    8453             :         if (ctx->ring_sock) {
    8454             :                 struct sock *sock = ctx->ring_sock->sk;
    8455             :                 struct sk_buff *skb;
    8456             : 
    8457             :                 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
    8458             :                         kfree_skb(skb);
    8459             :         }
    8460             : #else
    8461             :         int i;
    8462             : 
    8463           0 :         for (i = 0; i < ctx->nr_user_files; i++) {
    8464             :                 struct file *file;
    8465             : 
    8466           0 :                 file = io_file_from_index(ctx, i);
    8467           0 :                 if (file)
    8468           0 :                         fput(file);
    8469             :         }
    8470             : #endif
    8471           0 :         io_free_file_tables(&ctx->file_table);
    8472           0 :         io_rsrc_data_free(ctx->file_data);
    8473           0 :         ctx->file_data = NULL;
    8474           0 :         ctx->nr_user_files = 0;
    8475           0 : }
    8476             : 
    8477           0 : static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
    8478             : {
    8479             :         int ret;
    8480             : 
    8481           0 :         if (!ctx->file_data)
    8482             :                 return -ENXIO;
    8483           0 :         ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
    8484           0 :         if (!ret)
    8485           0 :                 __io_sqe_files_unregister(ctx);
    8486             :         return ret;
    8487             : }
    8488             : 
    8489           0 : static void io_sq_thread_unpark(struct io_sq_data *sqd)
    8490             :         __releases(&sqd->lock)
    8491             : {
    8492           0 :         WARN_ON_ONCE(sqd->thread == current);
    8493             : 
    8494             :         /*
    8495             :          * Do the dance but not conditional clear_bit() because it'd race with
    8496             :          * other threads incrementing park_pending and setting the bit.
    8497             :          */
    8498           0 :         clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
    8499           0 :         if (atomic_dec_return(&sqd->park_pending))
    8500           0 :                 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
    8501           0 :         mutex_unlock(&sqd->lock);
    8502           0 : }
    8503             : 
    8504           0 : static void io_sq_thread_park(struct io_sq_data *sqd)
    8505             :         __acquires(&sqd->lock)
    8506             : {
    8507           0 :         WARN_ON_ONCE(sqd->thread == current);
    8508             : 
    8509           0 :         atomic_inc(&sqd->park_pending);
    8510           0 :         set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
    8511           0 :         mutex_lock(&sqd->lock);
    8512           0 :         if (sqd->thread)
    8513           0 :                 wake_up_process(sqd->thread);
    8514           0 : }
    8515             : 
    8516           0 : static void io_sq_thread_stop(struct io_sq_data *sqd)
    8517             : {
    8518           0 :         WARN_ON_ONCE(sqd->thread == current);
    8519           0 :         WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
    8520             : 
    8521           0 :         set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
    8522           0 :         mutex_lock(&sqd->lock);
    8523           0 :         if (sqd->thread)
    8524           0 :                 wake_up_process(sqd->thread);
    8525           0 :         mutex_unlock(&sqd->lock);
    8526           0 :         wait_for_completion(&sqd->exited);
    8527           0 : }
    8528             : 
    8529           0 : static void io_put_sq_data(struct io_sq_data *sqd)
    8530             : {
    8531           0 :         if (refcount_dec_and_test(&sqd->refs)) {
    8532           0 :                 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
    8533             : 
    8534           0 :                 io_sq_thread_stop(sqd);
    8535           0 :                 kfree(sqd);
    8536             :         }
    8537           0 : }
    8538             : 
    8539           0 : static void io_sq_thread_finish(struct io_ring_ctx *ctx)
    8540             : {
    8541           0 :         struct io_sq_data *sqd = ctx->sq_data;
    8542             : 
    8543           0 :         if (sqd) {
    8544           0 :                 io_sq_thread_park(sqd);
    8545           0 :                 list_del_init(&ctx->sqd_list);
    8546           0 :                 io_sqd_update_thread_idle(sqd);
    8547           0 :                 io_sq_thread_unpark(sqd);
    8548             : 
    8549           0 :                 io_put_sq_data(sqd);
    8550           0 :                 ctx->sq_data = NULL;
    8551             :         }
    8552           0 : }
    8553             : 
    8554           0 : static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
    8555             : {
    8556             :         struct io_ring_ctx *ctx_attach;
    8557             :         struct io_sq_data *sqd;
    8558             :         struct fd f;
    8559             : 
    8560           0 :         f = fdget(p->wq_fd);
    8561           0 :         if (!f.file)
    8562             :                 return ERR_PTR(-ENXIO);
    8563           0 :         if (f.file->f_op != &io_uring_fops) {
    8564           0 :                 fdput(f);
    8565             :                 return ERR_PTR(-EINVAL);
    8566             :         }
    8567             : 
    8568           0 :         ctx_attach = f.file->private_data;
    8569           0 :         sqd = ctx_attach->sq_data;
    8570           0 :         if (!sqd) {
    8571           0 :                 fdput(f);
    8572             :                 return ERR_PTR(-EINVAL);
    8573             :         }
    8574           0 :         if (sqd->task_tgid != current->tgid) {
    8575           0 :                 fdput(f);
    8576             :                 return ERR_PTR(-EPERM);
    8577             :         }
    8578             : 
    8579           0 :         refcount_inc(&sqd->refs);
    8580           0 :         fdput(f);
    8581             :         return sqd;
    8582             : }
    8583             : 
    8584           0 : static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
    8585             :                                          bool *attached)
    8586             : {
    8587             :         struct io_sq_data *sqd;
    8588             : 
    8589           0 :         *attached = false;
    8590           0 :         if (p->flags & IORING_SETUP_ATTACH_WQ) {
    8591           0 :                 sqd = io_attach_sq_data(p);
    8592           0 :                 if (!IS_ERR(sqd)) {
    8593           0 :                         *attached = true;
    8594           0 :                         return sqd;
    8595             :                 }
    8596             :                 /* fall through for EPERM case, setup new sqd/task */
    8597           0 :                 if (PTR_ERR(sqd) != -EPERM)
    8598             :                         return sqd;
    8599             :         }
    8600             : 
    8601           0 :         sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
    8602           0 :         if (!sqd)
    8603             :                 return ERR_PTR(-ENOMEM);
    8604             : 
    8605           0 :         atomic_set(&sqd->park_pending, 0);
    8606           0 :         refcount_set(&sqd->refs, 1);
    8607           0 :         INIT_LIST_HEAD(&sqd->ctx_list);
    8608           0 :         mutex_init(&sqd->lock);
    8609           0 :         init_waitqueue_head(&sqd->wait);
    8610           0 :         init_completion(&sqd->exited);
    8611           0 :         return sqd;
    8612             : }
    8613             : 
    8614             : #if defined(CONFIG_UNIX)
    8615             : /*
    8616             :  * Ensure the UNIX gc is aware of our file set, so we are certain that
    8617             :  * the io_uring can be safely unregistered on process exit, even if we have
    8618             :  * loops in the file referencing.
    8619             :  */
    8620             : static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
    8621             : {
    8622             :         struct sock *sk = ctx->ring_sock->sk;
    8623             :         struct scm_fp_list *fpl;
    8624             :         struct sk_buff *skb;
    8625             :         int i, nr_files;
    8626             : 
    8627             :         fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
    8628             :         if (!fpl)
    8629             :                 return -ENOMEM;
    8630             : 
    8631             :         skb = alloc_skb(0, GFP_KERNEL);
    8632             :         if (!skb) {
    8633             :                 kfree(fpl);
    8634             :                 return -ENOMEM;
    8635             :         }
    8636             : 
    8637             :         skb->sk = sk;
    8638             : 
    8639             :         nr_files = 0;
    8640             :         fpl->user = get_uid(current_user());
    8641             :         for (i = 0; i < nr; i++) {
    8642             :                 struct file *file = io_file_from_index(ctx, i + offset);
    8643             : 
    8644             :                 if (!file)
    8645             :                         continue;
    8646             :                 fpl->fp[nr_files] = get_file(file);
    8647             :                 unix_inflight(fpl->user, fpl->fp[nr_files]);
    8648             :                 nr_files++;
    8649             :         }
    8650             : 
    8651             :         if (nr_files) {
    8652             :                 fpl->max = SCM_MAX_FD;
    8653             :                 fpl->count = nr_files;
    8654             :                 UNIXCB(skb).fp = fpl;
    8655             :                 skb->destructor = unix_destruct_scm;
    8656             :                 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
    8657             :                 skb_queue_head(&sk->sk_receive_queue, skb);
    8658             : 
    8659             :                 for (i = 0; i < nr; i++) {
    8660             :                         struct file *file = io_file_from_index(ctx, i + offset);
    8661             : 
    8662             :                         if (file)
    8663             :                                 fput(file);
    8664             :                 }
    8665             :         } else {
    8666             :                 kfree_skb(skb);
    8667             :                 free_uid(fpl->user);
    8668             :                 kfree(fpl);
    8669             :         }
    8670             : 
    8671             :         return 0;
    8672             : }
    8673             : 
    8674             : /*
    8675             :  * If UNIX sockets are enabled, fd passing can cause a reference cycle which
    8676             :  * causes regular reference counting to break down. We rely on the UNIX
    8677             :  * garbage collection to take care of this problem for us.
    8678             :  */
    8679             : static int io_sqe_files_scm(struct io_ring_ctx *ctx)
    8680             : {
    8681             :         unsigned left, total;
    8682             :         int ret = 0;
    8683             : 
    8684             :         total = 0;
    8685             :         left = ctx->nr_user_files;
    8686             :         while (left) {
    8687             :                 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
    8688             : 
    8689             :                 ret = __io_sqe_files_scm(ctx, this_files, total);
    8690             :                 if (ret)
    8691             :                         break;
    8692             :                 left -= this_files;
    8693             :                 total += this_files;
    8694             :         }
    8695             : 
    8696             :         if (!ret)
    8697             :                 return 0;
    8698             : 
    8699             :         while (total < ctx->nr_user_files) {
    8700             :                 struct file *file = io_file_from_index(ctx, total);
    8701             : 
    8702             :                 if (file)
    8703             :                         fput(file);
    8704             :                 total++;
    8705             :         }
    8706             : 
    8707             :         return ret;
    8708             : }
    8709             : #else
    8710             : static int io_sqe_files_scm(struct io_ring_ctx *ctx)
    8711             : {
    8712             :         return 0;
    8713             : }
    8714             : #endif
    8715             : 
    8716           0 : static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
    8717             : {
    8718           0 :         struct file *file = prsrc->file;
    8719             : #if defined(CONFIG_UNIX)
    8720             :         struct sock *sock = ctx->ring_sock->sk;
    8721             :         struct sk_buff_head list, *head = &sock->sk_receive_queue;
    8722             :         struct sk_buff *skb;
    8723             :         int i;
    8724             : 
    8725             :         __skb_queue_head_init(&list);
    8726             : 
    8727             :         /*
    8728             :          * Find the skb that holds this file in its SCM_RIGHTS. When found,
    8729             :          * remove this entry and rearrange the file array.
    8730             :          */
    8731             :         skb = skb_dequeue(head);
    8732             :         while (skb) {
    8733             :                 struct scm_fp_list *fp;
    8734             : 
    8735             :                 fp = UNIXCB(skb).fp;
    8736             :                 for (i = 0; i < fp->count; i++) {
    8737             :                         int left;
    8738             : 
    8739             :                         if (fp->fp[i] != file)
    8740             :                                 continue;
    8741             : 
    8742             :                         unix_notinflight(fp->user, fp->fp[i]);
    8743             :                         left = fp->count - 1 - i;
    8744             :                         if (left) {
    8745             :                                 memmove(&fp->fp[i], &fp->fp[i + 1],
    8746             :                                                 left * sizeof(struct file *));
    8747             :                         }
    8748             :                         fp->count--;
    8749             :                         if (!fp->count) {
    8750             :                                 kfree_skb(skb);
    8751             :                                 skb = NULL;
    8752             :                         } else {
    8753             :                                 __skb_queue_tail(&list, skb);
    8754             :                         }
    8755             :                         fput(file);
    8756             :                         file = NULL;
    8757             :                         break;
    8758             :                 }
    8759             : 
    8760             :                 if (!file)
    8761             :                         break;
    8762             : 
    8763             :                 __skb_queue_tail(&list, skb);
    8764             : 
    8765             :                 skb = skb_dequeue(head);
    8766             :         }
    8767             : 
    8768             :         if (skb_peek(&list)) {
    8769             :                 spin_lock_irq(&head->lock);
    8770             :                 while ((skb = __skb_dequeue(&list)) != NULL)
    8771             :                         __skb_queue_tail(head, skb);
    8772             :                 spin_unlock_irq(&head->lock);
    8773             :         }
    8774             : #else
    8775           0 :         fput(file);
    8776             : #endif
    8777           0 : }
    8778             : 
    8779           0 : static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
    8780             : {
    8781           0 :         struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
    8782           0 :         struct io_ring_ctx *ctx = rsrc_data->ctx;
    8783             :         struct io_rsrc_put *prsrc, *tmp;
    8784             : 
    8785           0 :         list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
    8786           0 :                 list_del(&prsrc->list);
    8787             : 
    8788           0 :                 if (prsrc->tag) {
    8789           0 :                         bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
    8790             : 
    8791           0 :                         io_ring_submit_lock(ctx, lock_ring);
    8792           0 :                         spin_lock(&ctx->completion_lock);
    8793           0 :                         io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
    8794           0 :                         io_commit_cqring(ctx);
    8795           0 :                         spin_unlock(&ctx->completion_lock);
    8796           0 :                         io_cqring_ev_posted(ctx);
    8797           0 :                         io_ring_submit_unlock(ctx, lock_ring);
    8798             :                 }
    8799             : 
    8800           0 :                 rsrc_data->do_put(ctx, prsrc);
    8801           0 :                 kfree(prsrc);
    8802             :         }
    8803             : 
    8804           0 :         io_rsrc_node_destroy(ref_node);
    8805           0 :         if (atomic_dec_and_test(&rsrc_data->refs))
    8806           0 :                 complete(&rsrc_data->done);
    8807           0 : }
    8808             : 
    8809           0 : static void io_rsrc_put_work(struct work_struct *work)
    8810             : {
    8811             :         struct io_ring_ctx *ctx;
    8812             :         struct llist_node *node;
    8813             : 
    8814           0 :         ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
    8815           0 :         node = llist_del_all(&ctx->rsrc_put_llist);
    8816             : 
    8817           0 :         while (node) {
    8818             :                 struct io_rsrc_node *ref_node;
    8819           0 :                 struct llist_node *next = node->next;
    8820             : 
    8821           0 :                 ref_node = llist_entry(node, struct io_rsrc_node, llist);
    8822           0 :                 __io_rsrc_put_work(ref_node);
    8823           0 :                 node = next;
    8824             :         }
    8825           0 : }
    8826             : 
    8827           0 : static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
    8828             :                                  unsigned nr_args, u64 __user *tags)
    8829             : {
    8830           0 :         __s32 __user *fds = (__s32 __user *) arg;
    8831             :         struct file *file;
    8832             :         int fd, ret;
    8833             :         unsigned i;
    8834             : 
    8835           0 :         if (ctx->file_data)
    8836             :                 return -EBUSY;
    8837           0 :         if (!nr_args)
    8838             :                 return -EINVAL;
    8839           0 :         if (nr_args > IORING_MAX_FIXED_FILES)
    8840             :                 return -EMFILE;
    8841           0 :         if (nr_args > rlimit(RLIMIT_NOFILE))
    8842             :                 return -EMFILE;
    8843           0 :         ret = io_rsrc_node_switch_start(ctx);
    8844           0 :         if (ret)
    8845             :                 return ret;
    8846           0 :         ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
    8847             :                                  &ctx->file_data);
    8848           0 :         if (ret)
    8849             :                 return ret;
    8850             : 
    8851           0 :         ret = -ENOMEM;
    8852           0 :         if (!io_alloc_file_tables(&ctx->file_table, nr_args))
    8853             :                 goto out_free;
    8854             : 
    8855           0 :         for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
    8856           0 :                 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
    8857             :                         ret = -EFAULT;
    8858             :                         goto out_fput;
    8859             :                 }
    8860             :                 /* allow sparse sets */
    8861           0 :                 if (fd == -1) {
    8862           0 :                         ret = -EINVAL;
    8863           0 :                         if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
    8864             :                                 goto out_fput;
    8865           0 :                         continue;
    8866             :                 }
    8867             : 
    8868           0 :                 file = fget(fd);
    8869           0 :                 ret = -EBADF;
    8870           0 :                 if (unlikely(!file))
    8871             :                         goto out_fput;
    8872             : 
    8873             :                 /*
    8874             :                  * Don't allow io_uring instances to be registered. If UNIX
    8875             :                  * isn't enabled, then this causes a reference cycle and this
    8876             :                  * instance can never get freed. If UNIX is enabled we'll
    8877             :                  * handle it just fine, but there's still no point in allowing
    8878             :                  * a ring fd as it doesn't support regular read/write anyway.
    8879             :                  */
    8880           0 :                 if (file->f_op == &io_uring_fops) {
    8881           0 :                         fput(file);
    8882           0 :                         goto out_fput;
    8883             :                 }
    8884           0 :                 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
    8885             :         }
    8886             : 
    8887           0 :         ret = io_sqe_files_scm(ctx);
    8888             :         if (ret) {
    8889             :                 __io_sqe_files_unregister(ctx);
    8890             :                 return ret;
    8891             :         }
    8892             : 
    8893           0 :         io_rsrc_node_switch(ctx, NULL);
    8894           0 :         return ret;
    8895             : out_fput:
    8896           0 :         for (i = 0; i < ctx->nr_user_files; i++) {
    8897           0 :                 file = io_file_from_index(ctx, i);
    8898           0 :                 if (file)
    8899           0 :                         fput(file);
    8900             :         }
    8901           0 :         io_free_file_tables(&ctx->file_table);
    8902           0 :         ctx->nr_user_files = 0;
    8903             : out_free:
    8904           0 :         io_rsrc_data_free(ctx->file_data);
    8905           0 :         ctx->file_data = NULL;
    8906           0 :         return ret;
    8907             : }
    8908             : 
    8909             : static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
    8910             :                                 int index)
    8911             : {
    8912             : #if defined(CONFIG_UNIX)
    8913             :         struct sock *sock = ctx->ring_sock->sk;
    8914             :         struct sk_buff_head *head = &sock->sk_receive_queue;
    8915             :         struct sk_buff *skb;
    8916             : 
    8917             :         /*
    8918             :          * See if we can merge this file into an existing skb SCM_RIGHTS
    8919             :          * file set. If there's no room, fall back to allocating a new skb
    8920             :          * and filling it in.
    8921             :          */
    8922             :         spin_lock_irq(&head->lock);
    8923             :         skb = skb_peek(head);
    8924             :         if (skb) {
    8925             :                 struct scm_fp_list *fpl = UNIXCB(skb).fp;
    8926             : 
    8927             :                 if (fpl->count < SCM_MAX_FD) {
    8928             :                         __skb_unlink(skb, head);
    8929             :                         spin_unlock_irq(&head->lock);
    8930             :                         fpl->fp[fpl->count] = get_file(file);
    8931             :                         unix_inflight(fpl->user, fpl->fp[fpl->count]);
    8932             :                         fpl->count++;
    8933             :                         spin_lock_irq(&head->lock);
    8934             :                         __skb_queue_head(head, skb);
    8935             :                 } else {
    8936             :                         skb = NULL;
    8937             :                 }
    8938             :         }
    8939             :         spin_unlock_irq(&head->lock);
    8940             : 
    8941             :         if (skb) {
    8942             :                 fput(file);
    8943             :                 return 0;
    8944             :         }
    8945             : 
    8946             :         return __io_sqe_files_scm(ctx, 1, index);
    8947             : #else
    8948             :         return 0;
    8949             : #endif
    8950             : }
    8951             : 
    8952           0 : static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
    8953             :                                  struct io_rsrc_node *node, void *rsrc)
    8954             : {
    8955           0 :         u64 *tag_slot = io_get_tag_slot(data, idx);
    8956             :         struct io_rsrc_put *prsrc;
    8957             : 
    8958           0 :         prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
    8959           0 :         if (!prsrc)
    8960             :                 return -ENOMEM;
    8961             : 
    8962           0 :         prsrc->tag = *tag_slot;
    8963           0 :         *tag_slot = 0;
    8964           0 :         prsrc->rsrc = rsrc;
    8965           0 :         list_add(&prsrc->list, &node->rsrc_list);
    8966           0 :         return 0;
    8967             : }
    8968             : 
    8969           0 : static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
    8970             :                                  unsigned int issue_flags, u32 slot_index)
    8971             : {
    8972           0 :         struct io_ring_ctx *ctx = req->ctx;
    8973           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    8974           0 :         bool needs_switch = false;
    8975             :         struct io_fixed_file *file_slot;
    8976           0 :         int ret = -EBADF;
    8977             : 
    8978           0 :         io_ring_submit_lock(ctx, needs_lock);
    8979           0 :         if (file->f_op == &io_uring_fops)
    8980             :                 goto err;
    8981           0 :         ret = -ENXIO;
    8982           0 :         if (!ctx->file_data)
    8983             :                 goto err;
    8984           0 :         ret = -EINVAL;
    8985           0 :         if (slot_index >= ctx->nr_user_files)
    8986             :                 goto err;
    8987             : 
    8988           0 :         slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
    8989           0 :         file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
    8990             : 
    8991           0 :         if (file_slot->file_ptr) {
    8992             :                 struct file *old_file;
    8993             : 
    8994           0 :                 ret = io_rsrc_node_switch_start(ctx);
    8995           0 :                 if (ret)
    8996             :                         goto err;
    8997             : 
    8998           0 :                 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
    8999           0 :                 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
    9000             :                                             ctx->rsrc_node, old_file);
    9001           0 :                 if (ret)
    9002             :                         goto err;
    9003           0 :                 file_slot->file_ptr = 0;
    9004           0 :                 needs_switch = true;
    9005             :         }
    9006             : 
    9007           0 :         *io_get_tag_slot(ctx->file_data, slot_index) = 0;
    9008           0 :         io_fixed_file_set(file_slot, file);
    9009           0 :         ret = io_sqe_file_register(ctx, file, slot_index);
    9010             :         if (ret) {
    9011             :                 file_slot->file_ptr = 0;
    9012             :                 goto err;
    9013             :         }
    9014             : 
    9015           0 :         ret = 0;
    9016             : err:
    9017           0 :         if (needs_switch)
    9018           0 :                 io_rsrc_node_switch(ctx, ctx->file_data);
    9019           0 :         io_ring_submit_unlock(ctx, needs_lock);
    9020           0 :         if (ret)
    9021           0 :                 fput(file);
    9022           0 :         return ret;
    9023             : }
    9024             : 
    9025           0 : static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
    9026             : {
    9027           0 :         unsigned int offset = req->close.file_slot - 1;
    9028           0 :         struct io_ring_ctx *ctx = req->ctx;
    9029           0 :         bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
    9030             :         struct io_fixed_file *file_slot;
    9031             :         struct file *file;
    9032             :         int ret;
    9033             : 
    9034           0 :         io_ring_submit_lock(ctx, needs_lock);
    9035           0 :         ret = -ENXIO;
    9036           0 :         if (unlikely(!ctx->file_data))
    9037             :                 goto out;
    9038           0 :         ret = -EINVAL;
    9039           0 :         if (offset >= ctx->nr_user_files)
    9040             :                 goto out;
    9041           0 :         ret = io_rsrc_node_switch_start(ctx);
    9042           0 :         if (ret)
    9043             :                 goto out;
    9044             : 
    9045           0 :         offset = array_index_nospec(offset, ctx->nr_user_files);
    9046           0 :         file_slot = io_fixed_file_slot(&ctx->file_table, offset);
    9047           0 :         ret = -EBADF;
    9048           0 :         if (!file_slot->file_ptr)
    9049             :                 goto out;
    9050             : 
    9051           0 :         file = (struct file *)(file_slot->file_ptr & FFS_MASK);
    9052           0 :         ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
    9053           0 :         if (ret)
    9054             :                 goto out;
    9055             : 
    9056           0 :         file_slot->file_ptr = 0;
    9057           0 :         io_rsrc_node_switch(ctx, ctx->file_data);
    9058           0 :         ret = 0;
    9059             : out:
    9060           0 :         io_ring_submit_unlock(ctx, needs_lock);
    9061           0 :         return ret;
    9062             : }
    9063             : 
    9064           0 : static int __io_sqe_files_update(struct io_ring_ctx *ctx,
    9065             :                                  struct io_uring_rsrc_update2 *up,
    9066             :                                  unsigned nr_args)
    9067             : {
    9068           0 :         u64 __user *tags = u64_to_user_ptr(up->tags);
    9069           0 :         __s32 __user *fds = u64_to_user_ptr(up->data);
    9070           0 :         struct io_rsrc_data *data = ctx->file_data;
    9071             :         struct io_fixed_file *file_slot;
    9072             :         struct file *file;
    9073           0 :         int fd, i, err = 0;
    9074             :         unsigned int done;
    9075           0 :         bool needs_switch = false;
    9076             : 
    9077           0 :         if (!ctx->file_data)
    9078             :                 return -ENXIO;
    9079           0 :         if (up->offset + nr_args > ctx->nr_user_files)
    9080             :                 return -EINVAL;
    9081             : 
    9082           0 :         for (done = 0; done < nr_args; done++) {
    9083           0 :                 u64 tag = 0;
    9084             : 
    9085           0 :                 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
    9086           0 :                     copy_from_user(&fd, &fds[done], sizeof(fd))) {
    9087             :                         err = -EFAULT;
    9088           0 :                         break;
    9089             :                 }
    9090           0 :                 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
    9091             :                         err = -EINVAL;
    9092             :                         break;
    9093             :                 }
    9094           0 :                 if (fd == IORING_REGISTER_FILES_SKIP)
    9095           0 :                         continue;
    9096             : 
    9097           0 :                 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
    9098           0 :                 file_slot = io_fixed_file_slot(&ctx->file_table, i);
    9099             : 
    9100           0 :                 if (file_slot->file_ptr) {
    9101           0 :                         file = (struct file *)(file_slot->file_ptr & FFS_MASK);
    9102           0 :                         err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
    9103           0 :                         if (err)
    9104             :                                 break;
    9105           0 :                         file_slot->file_ptr = 0;
    9106           0 :                         needs_switch = true;
    9107             :                 }
    9108           0 :                 if (fd != -1) {
    9109           0 :                         file = fget(fd);
    9110           0 :                         if (!file) {
    9111             :                                 err = -EBADF;
    9112             :                                 break;
    9113             :                         }
    9114             :                         /*
    9115             :                          * Don't allow io_uring instances to be registered. If
    9116             :                          * UNIX isn't enabled, then this causes a reference
    9117             :                          * cycle and this instance can never get freed. If UNIX
    9118             :                          * is enabled we'll handle it just fine, but there's
    9119             :                          * still no point in allowing a ring fd as it doesn't
    9120             :                          * support regular read/write anyway.
    9121             :                          */
    9122           0 :                         if (file->f_op == &io_uring_fops) {
    9123           0 :                                 fput(file);
    9124           0 :                                 err = -EBADF;
    9125           0 :                                 break;
    9126             :                         }
    9127           0 :                         *io_get_tag_slot(data, i) = tag;
    9128           0 :                         io_fixed_file_set(file_slot, file);
    9129           0 :                         err = io_sqe_file_register(ctx, file, i);
    9130             :                         if (err) {
    9131             :                                 file_slot->file_ptr = 0;
    9132             :                                 fput(file);
    9133             :                                 break;
    9134             :                         }
    9135             :                 }
    9136             :         }
    9137             : 
    9138           0 :         if (needs_switch)
    9139           0 :                 io_rsrc_node_switch(ctx, data);
    9140           0 :         return done ? done : err;
    9141             : }
    9142             : 
    9143           0 : static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
    9144             :                                         struct task_struct *task)
    9145             : {
    9146             :         struct io_wq_hash *hash;
    9147             :         struct io_wq_data data;
    9148             :         unsigned int concurrency;
    9149             : 
    9150           0 :         mutex_lock(&ctx->uring_lock);
    9151           0 :         hash = ctx->hash_map;
    9152           0 :         if (!hash) {
    9153           0 :                 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
    9154           0 :                 if (!hash) {
    9155           0 :                         mutex_unlock(&ctx->uring_lock);
    9156           0 :                         return ERR_PTR(-ENOMEM);
    9157             :                 }
    9158           0 :                 refcount_set(&hash->refs, 1);
    9159           0 :                 init_waitqueue_head(&hash->wait);
    9160           0 :                 ctx->hash_map = hash;
    9161             :         }
    9162           0 :         mutex_unlock(&ctx->uring_lock);
    9163             : 
    9164           0 :         data.hash = hash;
    9165           0 :         data.task = task;
    9166           0 :         data.free_work = io_wq_free_work;
    9167           0 :         data.do_work = io_wq_submit_work;
    9168             : 
    9169             :         /* Do QD, or 4 * CPUS, whatever is smallest */
    9170           0 :         concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
    9171             : 
    9172           0 :         return io_wq_create(concurrency, &data);
    9173             : }
    9174             : 
    9175           0 : static __cold int io_uring_alloc_task_context(struct task_struct *task,
    9176             :                                               struct io_ring_ctx *ctx)
    9177             : {
    9178             :         struct io_uring_task *tctx;
    9179             :         int ret;
    9180             : 
    9181           0 :         tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
    9182           0 :         if (unlikely(!tctx))
    9183             :                 return -ENOMEM;
    9184             : 
    9185           0 :         tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX,
    9186             :                                          sizeof(struct file *), GFP_KERNEL);
    9187           0 :         if (unlikely(!tctx->registered_rings)) {
    9188           0 :                 kfree(tctx);
    9189           0 :                 return -ENOMEM;
    9190             :         }
    9191             : 
    9192           0 :         ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
    9193             :         if (unlikely(ret)) {
    9194             :                 kfree(tctx->registered_rings);
    9195             :                 kfree(tctx);
    9196             :                 return ret;
    9197             :         }
    9198             : 
    9199           0 :         tctx->io_wq = io_init_wq_offload(ctx, task);
    9200           0 :         if (IS_ERR(tctx->io_wq)) {
    9201           0 :                 ret = PTR_ERR(tctx->io_wq);
    9202           0 :                 percpu_counter_destroy(&tctx->inflight);
    9203           0 :                 kfree(tctx->registered_rings);
    9204           0 :                 kfree(tctx);
    9205           0 :                 return ret;
    9206             :         }
    9207             : 
    9208           0 :         xa_init(&tctx->xa);
    9209           0 :         init_waitqueue_head(&tctx->wait);
    9210           0 :         atomic_set(&tctx->in_idle, 0);
    9211           0 :         task->io_uring = tctx;
    9212           0 :         spin_lock_init(&tctx->task_lock);
    9213           0 :         INIT_WQ_LIST(&tctx->task_list);
    9214           0 :         INIT_WQ_LIST(&tctx->prior_task_list);
    9215           0 :         init_task_work(&tctx->task_work, tctx_task_work);
    9216           0 :         return 0;
    9217             : }
    9218             : 
    9219           0 : void __io_uring_free(struct task_struct *tsk)
    9220             : {
    9221           0 :         struct io_uring_task *tctx = tsk->io_uring;
    9222             : 
    9223           0 :         WARN_ON_ONCE(!xa_empty(&tctx->xa));
    9224           0 :         WARN_ON_ONCE(tctx->io_wq);
    9225           0 :         WARN_ON_ONCE(tctx->cached_refs);
    9226             : 
    9227           0 :         kfree(tctx->registered_rings);
    9228           0 :         percpu_counter_destroy(&tctx->inflight);
    9229           0 :         kfree(tctx);
    9230           0 :         tsk->io_uring = NULL;
    9231           0 : }
    9232             : 
    9233           0 : static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
    9234             :                                        struct io_uring_params *p)
    9235             : {
    9236             :         int ret;
    9237             : 
    9238             :         /* Retain compatibility with failing for an invalid attach attempt */
    9239           0 :         if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
    9240             :                                 IORING_SETUP_ATTACH_WQ) {
    9241             :                 struct fd f;
    9242             : 
    9243           0 :                 f = fdget(p->wq_fd);
    9244           0 :                 if (!f.file)
    9245           0 :                         return -ENXIO;
    9246           0 :                 if (f.file->f_op != &io_uring_fops) {
    9247           0 :                         fdput(f);
    9248             :                         return -EINVAL;
    9249             :                 }
    9250           0 :                 fdput(f);
    9251             :         }
    9252           0 :         if (ctx->flags & IORING_SETUP_SQPOLL) {
    9253             :                 struct task_struct *tsk;
    9254             :                 struct io_sq_data *sqd;
    9255             :                 bool attached;
    9256             : 
    9257           0 :                 ret = security_uring_sqpoll();
    9258             :                 if (ret)
    9259           0 :                         return ret;
    9260             : 
    9261           0 :                 sqd = io_get_sq_data(p, &attached);
    9262           0 :                 if (IS_ERR(sqd)) {
    9263           0 :                         ret = PTR_ERR(sqd);
    9264           0 :                         goto err;
    9265             :                 }
    9266             : 
    9267           0 :                 ctx->sq_creds = get_current_cred();
    9268           0 :                 ctx->sq_data = sqd;
    9269           0 :                 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
    9270           0 :                 if (!ctx->sq_thread_idle)
    9271           0 :                         ctx->sq_thread_idle = HZ;
    9272             : 
    9273           0 :                 io_sq_thread_park(sqd);
    9274           0 :                 list_add(&ctx->sqd_list, &sqd->ctx_list);
    9275           0 :                 io_sqd_update_thread_idle(sqd);
    9276             :                 /* don't attach to a dying SQPOLL thread, would be racy */
    9277           0 :                 ret = (attached && !sqd->thread) ? -ENXIO : 0;
    9278           0 :                 io_sq_thread_unpark(sqd);
    9279             : 
    9280           0 :                 if (ret < 0)
    9281             :                         goto err;
    9282           0 :                 if (attached)
    9283             :                         return 0;
    9284             : 
    9285           0 :                 if (p->flags & IORING_SETUP_SQ_AFF) {
    9286           0 :                         int cpu = p->sq_thread_cpu;
    9287             : 
    9288           0 :                         ret = -EINVAL;
    9289           0 :                         if (cpu >= nr_cpu_ids || !cpu_online(cpu))
    9290             :                                 goto err_sqpoll;
    9291           0 :                         sqd->sq_cpu = cpu;
    9292             :                 } else {
    9293           0 :                         sqd->sq_cpu = -1;
    9294             :                 }
    9295             : 
    9296           0 :                 sqd->task_pid = current->pid;
    9297           0 :                 sqd->task_tgid = current->tgid;
    9298           0 :                 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
    9299           0 :                 if (IS_ERR(tsk)) {
    9300           0 :                         ret = PTR_ERR(tsk);
    9301           0 :                         goto err_sqpoll;
    9302             :                 }
    9303             : 
    9304           0 :                 sqd->thread = tsk;
    9305           0 :                 ret = io_uring_alloc_task_context(tsk, ctx);
    9306           0 :                 wake_up_new_task(tsk);
    9307           0 :                 if (ret)
    9308             :                         goto err;
    9309           0 :         } else if (p->flags & IORING_SETUP_SQ_AFF) {
    9310             :                 /* Can't have SQ_AFF without SQPOLL */
    9311             :                 ret = -EINVAL;
    9312             :                 goto err;
    9313             :         }
    9314             : 
    9315             :         return 0;
    9316             : err_sqpoll:
    9317           0 :         complete(&ctx->sq_data->exited);
    9318             : err:
    9319           0 :         io_sq_thread_finish(ctx);
    9320           0 :         return ret;
    9321             : }
    9322             : 
    9323             : static inline void __io_unaccount_mem(struct user_struct *user,
    9324             :                                       unsigned long nr_pages)
    9325             : {
    9326           0 :         atomic_long_sub(nr_pages, &user->locked_vm);
    9327             : }
    9328             : 
    9329             : static inline int __io_account_mem(struct user_struct *user,
    9330             :                                    unsigned long nr_pages)
    9331             : {
    9332             :         unsigned long page_limit, cur_pages, new_pages;
    9333             : 
    9334             :         /* Don't allow more pages than we can safely lock */
    9335           0 :         page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
    9336             : 
    9337             :         do {
    9338           0 :                 cur_pages = atomic_long_read(&user->locked_vm);
    9339           0 :                 new_pages = cur_pages + nr_pages;
    9340           0 :                 if (new_pages > page_limit)
    9341             :                         return -ENOMEM;
    9342           0 :         } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
    9343           0 :                                         new_pages) != cur_pages);
    9344             : 
    9345             :         return 0;
    9346             : }
    9347             : 
    9348             : static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
    9349             : {
    9350           0 :         if (ctx->user)
    9351           0 :                 __io_unaccount_mem(ctx->user, nr_pages);
    9352             : 
    9353           0 :         if (ctx->mm_account)
    9354           0 :                 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
    9355             : }
    9356             : 
    9357             : static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
    9358             : {
    9359             :         int ret;
    9360             : 
    9361           0 :         if (ctx->user) {
    9362           0 :                 ret = __io_account_mem(ctx->user, nr_pages);
    9363           0 :                 if (ret)
    9364             :                         return ret;
    9365             :         }
    9366             : 
    9367           0 :         if (ctx->mm_account)
    9368           0 :                 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
    9369             : 
    9370             :         return 0;
    9371             : }
    9372             : 
    9373           0 : static void io_mem_free(void *ptr)
    9374             : {
    9375             :         struct page *page;
    9376             : 
    9377           0 :         if (!ptr)
    9378             :                 return;
    9379             : 
    9380           0 :         page = virt_to_head_page(ptr);
    9381           0 :         if (put_page_testzero(page))
    9382           0 :                 free_compound_page(page);
    9383             : }
    9384             : 
    9385           0 : static void *io_mem_alloc(size_t size)
    9386             : {
    9387           0 :         gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
    9388             : 
    9389           0 :         return (void *) __get_free_pages(gfp, get_order(size));
    9390             : }
    9391             : 
    9392             : static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
    9393             :                                 size_t *sq_offset)
    9394             : {
    9395             :         struct io_rings *rings;
    9396             :         size_t off, sq_array_size;
    9397             : 
    9398           0 :         off = struct_size(rings, cqes, cq_entries);
    9399           0 :         if (off == SIZE_MAX)
    9400             :                 return SIZE_MAX;
    9401             : 
    9402             : #ifdef CONFIG_SMP
    9403             :         off = ALIGN(off, SMP_CACHE_BYTES);
    9404             :         if (off == 0)
    9405             :                 return SIZE_MAX;
    9406             : #endif
    9407             : 
    9408             :         if (sq_offset)
    9409           0 :                 *sq_offset = off;
    9410             : 
    9411           0 :         sq_array_size = array_size(sizeof(u32), sq_entries);
    9412           0 :         if (sq_array_size == SIZE_MAX)
    9413             :                 return SIZE_MAX;
    9414             : 
    9415           0 :         if (check_add_overflow(off, sq_array_size, &off))
    9416             :                 return SIZE_MAX;
    9417             : 
    9418             :         return off;
    9419             : }
    9420             : 
    9421           0 : static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
    9422             : {
    9423           0 :         struct io_mapped_ubuf *imu = *slot;
    9424             :         unsigned int i;
    9425             : 
    9426           0 :         if (imu != ctx->dummy_ubuf) {
    9427           0 :                 for (i = 0; i < imu->nr_bvecs; i++)
    9428           0 :                         unpin_user_page(imu->bvec[i].bv_page);
    9429           0 :                 if (imu->acct_pages)
    9430           0 :                         io_unaccount_mem(ctx, imu->acct_pages);
    9431           0 :                 kvfree(imu);
    9432             :         }
    9433           0 :         *slot = NULL;
    9434           0 : }
    9435             : 
    9436           0 : static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
    9437             : {
    9438           0 :         io_buffer_unmap(ctx, &prsrc->buf);
    9439           0 :         prsrc->buf = NULL;
    9440           0 : }
    9441             : 
    9442           0 : static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
    9443             : {
    9444             :         unsigned int i;
    9445             : 
    9446           0 :         for (i = 0; i < ctx->nr_user_bufs; i++)
    9447           0 :                 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
    9448           0 :         kfree(ctx->user_bufs);
    9449           0 :         io_rsrc_data_free(ctx->buf_data);
    9450           0 :         ctx->user_bufs = NULL;
    9451           0 :         ctx->buf_data = NULL;
    9452           0 :         ctx->nr_user_bufs = 0;
    9453           0 : }
    9454             : 
    9455           0 : static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
    9456             : {
    9457             :         int ret;
    9458             : 
    9459           0 :         if (!ctx->buf_data)
    9460             :                 return -ENXIO;
    9461             : 
    9462           0 :         ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
    9463           0 :         if (!ret)
    9464           0 :                 __io_sqe_buffers_unregister(ctx);
    9465             :         return ret;
    9466             : }
    9467             : 
    9468           0 : static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
    9469             :                        void __user *arg, unsigned index)
    9470             : {
    9471             :         struct iovec __user *src;
    9472             : 
    9473             : #ifdef CONFIG_COMPAT
    9474             :         if (ctx->compat) {
    9475             :                 struct compat_iovec __user *ciovs;
    9476             :                 struct compat_iovec ciov;
    9477             : 
    9478             :                 ciovs = (struct compat_iovec __user *) arg;
    9479             :                 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
    9480             :                         return -EFAULT;
    9481             : 
    9482             :                 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
    9483             :                 dst->iov_len = ciov.iov_len;
    9484             :                 return 0;
    9485             :         }
    9486             : #endif
    9487           0 :         src = (struct iovec __user *) arg;
    9488           0 :         if (copy_from_user(dst, &src[index], sizeof(*dst)))
    9489             :                 return -EFAULT;
    9490             :         return 0;
    9491             : }
    9492             : 
    9493             : /*
    9494             :  * Not super efficient, but this is just a registration time. And we do cache
    9495             :  * the last compound head, so generally we'll only do a full search if we don't
    9496             :  * match that one.
    9497             :  *
    9498             :  * We check if the given compound head page has already been accounted, to
    9499             :  * avoid double accounting it. This allows us to account the full size of the
    9500             :  * page, not just the constituent pages of a huge page.
    9501             :  */
    9502           0 : static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
    9503             :                                   int nr_pages, struct page *hpage)
    9504             : {
    9505             :         int i, j;
    9506             : 
    9507             :         /* check current page array */
    9508           0 :         for (i = 0; i < nr_pages; i++) {
    9509           0 :                 if (!PageCompound(pages[i]))
    9510           0 :                         continue;
    9511           0 :                 if (compound_head(pages[i]) == hpage)
    9512             :                         return true;
    9513             :         }
    9514             : 
    9515             :         /* check previously registered pages */
    9516           0 :         for (i = 0; i < ctx->nr_user_bufs; i++) {
    9517           0 :                 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
    9518             : 
    9519           0 :                 for (j = 0; j < imu->nr_bvecs; j++) {
    9520           0 :                         if (!PageCompound(imu->bvec[j].bv_page))
    9521           0 :                                 continue;
    9522           0 :                         if (compound_head(imu->bvec[j].bv_page) == hpage)
    9523             :                                 return true;
    9524             :                 }
    9525             :         }
    9526             : 
    9527             :         return false;
    9528             : }
    9529             : 
    9530           0 : static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
    9531             :                                  int nr_pages, struct io_mapped_ubuf *imu,
    9532             :                                  struct page **last_hpage)
    9533             : {
    9534             :         int i, ret;
    9535             : 
    9536           0 :         imu->acct_pages = 0;
    9537           0 :         for (i = 0; i < nr_pages; i++) {
    9538           0 :                 if (!PageCompound(pages[i])) {
    9539           0 :                         imu->acct_pages++;
    9540             :                 } else {
    9541             :                         struct page *hpage;
    9542             : 
    9543           0 :                         hpage = compound_head(pages[i]);
    9544           0 :                         if (hpage == *last_hpage)
    9545           0 :                                 continue;
    9546           0 :                         *last_hpage = hpage;
    9547           0 :                         if (headpage_already_acct(ctx, pages, i, hpage))
    9548           0 :                                 continue;
    9549           0 :                         imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
    9550             :                 }
    9551             :         }
    9552             : 
    9553           0 :         if (!imu->acct_pages)
    9554             :                 return 0;
    9555             : 
    9556           0 :         ret = io_account_mem(ctx, imu->acct_pages);
    9557           0 :         if (ret)
    9558           0 :                 imu->acct_pages = 0;
    9559             :         return ret;
    9560             : }
    9561             : 
    9562           0 : static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
    9563             :                                   struct io_mapped_ubuf **pimu,
    9564             :                                   struct page **last_hpage)
    9565             : {
    9566           0 :         struct io_mapped_ubuf *imu = NULL;
    9567           0 :         struct vm_area_struct **vmas = NULL;
    9568           0 :         struct page **pages = NULL;
    9569             :         unsigned long off, start, end, ubuf;
    9570             :         size_t size;
    9571             :         int ret, pret, nr_pages, i;
    9572             : 
    9573           0 :         if (!iov->iov_base) {
    9574           0 :                 *pimu = ctx->dummy_ubuf;
    9575           0 :                 return 0;
    9576             :         }
    9577             : 
    9578           0 :         ubuf = (unsigned long) iov->iov_base;
    9579           0 :         end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
    9580           0 :         start = ubuf >> PAGE_SHIFT;
    9581           0 :         nr_pages = end - start;
    9582             : 
    9583           0 :         *pimu = NULL;
    9584           0 :         ret = -ENOMEM;
    9585             : 
    9586           0 :         pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
    9587           0 :         if (!pages)
    9588             :                 goto done;
    9589             : 
    9590           0 :         vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
    9591             :                               GFP_KERNEL);
    9592           0 :         if (!vmas)
    9593             :                 goto done;
    9594             : 
    9595           0 :         imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
    9596           0 :         if (!imu)
    9597             :                 goto done;
    9598             : 
    9599           0 :         ret = 0;
    9600           0 :         mmap_read_lock(current->mm);
    9601           0 :         pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
    9602             :                               pages, vmas);
    9603           0 :         if (pret == nr_pages) {
    9604             :                 /* don't support file backed memory */
    9605           0 :                 for (i = 0; i < nr_pages; i++) {
    9606           0 :                         struct vm_area_struct *vma = vmas[i];
    9607             : 
    9608           0 :                         if (vma_is_shmem(vma))
    9609           0 :                                 continue;
    9610           0 :                         if (vma->vm_file &&
    9611             :                             !is_file_hugepages(vma->vm_file)) {
    9612             :                                 ret = -EOPNOTSUPP;
    9613             :                                 break;
    9614             :                         }
    9615             :                 }
    9616             :         } else {
    9617           0 :                 ret = pret < 0 ? pret : -EFAULT;
    9618             :         }
    9619           0 :         mmap_read_unlock(current->mm);
    9620           0 :         if (ret) {
    9621             :                 /*
    9622             :                  * if we did partial map, or found file backed vmas,
    9623             :                  * release any pages we did get
    9624             :                  */
    9625           0 :                 if (pret > 0)
    9626           0 :                         unpin_user_pages(pages, pret);
    9627             :                 goto done;
    9628             :         }
    9629             : 
    9630           0 :         ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
    9631           0 :         if (ret) {
    9632           0 :                 unpin_user_pages(pages, pret);
    9633           0 :                 goto done;
    9634             :         }
    9635             : 
    9636           0 :         off = ubuf & ~PAGE_MASK;
    9637           0 :         size = iov->iov_len;
    9638           0 :         for (i = 0; i < nr_pages; i++) {
    9639             :                 size_t vec_len;
    9640             : 
    9641           0 :                 vec_len = min_t(size_t, size, PAGE_SIZE - off);
    9642           0 :                 imu->bvec[i].bv_page = pages[i];
    9643           0 :                 imu->bvec[i].bv_len = vec_len;
    9644           0 :                 imu->bvec[i].bv_offset = off;
    9645           0 :                 off = 0;
    9646           0 :                 size -= vec_len;
    9647             :         }
    9648             :         /* store original address for later verification */
    9649           0 :         imu->ubuf = ubuf;
    9650           0 :         imu->ubuf_end = ubuf + iov->iov_len;
    9651           0 :         imu->nr_bvecs = nr_pages;
    9652           0 :         *pimu = imu;
    9653           0 :         ret = 0;
    9654             : done:
    9655           0 :         if (ret)
    9656           0 :                 kvfree(imu);
    9657           0 :         kvfree(pages);
    9658           0 :         kvfree(vmas);
    9659           0 :         return ret;
    9660             : }
    9661             : 
    9662             : static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
    9663             : {
    9664           0 :         ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
    9665           0 :         return ctx->user_bufs ? 0 : -ENOMEM;
    9666             : }
    9667             : 
    9668             : static int io_buffer_validate(struct iovec *iov)
    9669             : {
    9670           0 :         unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
    9671             : 
    9672             :         /*
    9673             :          * Don't impose further limits on the size and buffer
    9674             :          * constraints here, we'll -EINVAL later when IO is
    9675             :          * submitted if they are wrong.
    9676             :          */
    9677           0 :         if (!iov->iov_base)
    9678           0 :                 return iov->iov_len ? -EFAULT : 0;
    9679           0 :         if (!iov->iov_len)
    9680             :                 return -EFAULT;
    9681             : 
    9682             :         /* arbitrary limit, but we need something */
    9683           0 :         if (iov->iov_len > SZ_1G)
    9684             :                 return -EFAULT;
    9685             : 
    9686           0 :         if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
    9687             :                 return -EOVERFLOW;
    9688             : 
    9689             :         return 0;
    9690             : }
    9691             : 
    9692           0 : static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
    9693             :                                    unsigned int nr_args, u64 __user *tags)
    9694             : {
    9695           0 :         struct page *last_hpage = NULL;
    9696             :         struct io_rsrc_data *data;
    9697             :         int i, ret;
    9698             :         struct iovec iov;
    9699             : 
    9700           0 :         if (ctx->user_bufs)
    9701             :                 return -EBUSY;
    9702           0 :         if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
    9703             :                 return -EINVAL;
    9704           0 :         ret = io_rsrc_node_switch_start(ctx);
    9705           0 :         if (ret)
    9706             :                 return ret;
    9707           0 :         ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
    9708           0 :         if (ret)
    9709             :                 return ret;
    9710           0 :         ret = io_buffers_map_alloc(ctx, nr_args);
    9711           0 :         if (ret) {
    9712           0 :                 io_rsrc_data_free(data);
    9713           0 :                 return ret;
    9714             :         }
    9715             : 
    9716           0 :         for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
    9717           0 :                 ret = io_copy_iov(ctx, &iov, arg, i);
    9718           0 :                 if (ret)
    9719             :                         break;
    9720           0 :                 ret = io_buffer_validate(&iov);
    9721           0 :                 if (ret)
    9722             :                         break;
    9723           0 :                 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
    9724             :                         ret = -EINVAL;
    9725             :                         break;
    9726             :                 }
    9727             : 
    9728           0 :                 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
    9729             :                                              &last_hpage);
    9730           0 :                 if (ret)
    9731             :                         break;
    9732             :         }
    9733             : 
    9734           0 :         WARN_ON_ONCE(ctx->buf_data);
    9735             : 
    9736           0 :         ctx->buf_data = data;
    9737           0 :         if (ret)
    9738           0 :                 __io_sqe_buffers_unregister(ctx);
    9739             :         else
    9740           0 :                 io_rsrc_node_switch(ctx, NULL);
    9741             :         return ret;
    9742             : }
    9743             : 
    9744           0 : static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
    9745             :                                    struct io_uring_rsrc_update2 *up,
    9746             :                                    unsigned int nr_args)
    9747             : {
    9748           0 :         u64 __user *tags = u64_to_user_ptr(up->tags);
    9749           0 :         struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
    9750           0 :         struct page *last_hpage = NULL;
    9751           0 :         bool needs_switch = false;
    9752             :         __u32 done;
    9753             :         int i, err;
    9754             : 
    9755           0 :         if (!ctx->buf_data)
    9756             :                 return -ENXIO;
    9757           0 :         if (up->offset + nr_args > ctx->nr_user_bufs)
    9758             :                 return -EINVAL;
    9759             : 
    9760           0 :         for (done = 0; done < nr_args; done++) {
    9761             :                 struct io_mapped_ubuf *imu;
    9762           0 :                 int offset = up->offset + done;
    9763           0 :                 u64 tag = 0;
    9764             : 
    9765           0 :                 err = io_copy_iov(ctx, &iov, iovs, done);
    9766           0 :                 if (err)
    9767             :                         break;
    9768           0 :                 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
    9769             :                         err = -EFAULT;
    9770             :                         break;
    9771             :                 }
    9772           0 :                 err = io_buffer_validate(&iov);
    9773           0 :                 if (err)
    9774             :                         break;
    9775           0 :                 if (!iov.iov_base && tag) {
    9776             :                         err = -EINVAL;
    9777             :                         break;
    9778             :                 }
    9779           0 :                 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
    9780           0 :                 if (err)
    9781             :                         break;
    9782             : 
    9783           0 :                 i = array_index_nospec(offset, ctx->nr_user_bufs);
    9784           0 :                 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
    9785           0 :                         err = io_queue_rsrc_removal(ctx->buf_data, i,
    9786             :                                                     ctx->rsrc_node, ctx->user_bufs[i]);
    9787           0 :                         if (unlikely(err)) {
    9788           0 :                                 io_buffer_unmap(ctx, &imu);
    9789           0 :                                 break;
    9790             :                         }
    9791           0 :                         ctx->user_bufs[i] = NULL;
    9792           0 :                         needs_switch = true;
    9793             :                 }
    9794             : 
    9795           0 :                 ctx->user_bufs[i] = imu;
    9796           0 :                 *io_get_tag_slot(ctx->buf_data, offset) = tag;
    9797             :         }
    9798             : 
    9799           0 :         if (needs_switch)
    9800           0 :                 io_rsrc_node_switch(ctx, ctx->buf_data);
    9801           0 :         return done ? done : err;
    9802             : }
    9803             : 
    9804           0 : static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
    9805             :                                unsigned int eventfd_async)
    9806             : {
    9807             :         struct io_ev_fd *ev_fd;
    9808           0 :         __s32 __user *fds = arg;
    9809             :         int fd;
    9810             : 
    9811           0 :         ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
    9812             :                                         lockdep_is_held(&ctx->uring_lock));
    9813           0 :         if (ev_fd)
    9814             :                 return -EBUSY;
    9815             : 
    9816           0 :         if (copy_from_user(&fd, fds, sizeof(*fds)))
    9817             :                 return -EFAULT;
    9818             : 
    9819           0 :         ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
    9820           0 :         if (!ev_fd)
    9821             :                 return -ENOMEM;
    9822             : 
    9823           0 :         ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
    9824           0 :         if (IS_ERR(ev_fd->cq_ev_fd)) {
    9825           0 :                 int ret = PTR_ERR(ev_fd->cq_ev_fd);
    9826           0 :                 kfree(ev_fd);
    9827           0 :                 return ret;
    9828             :         }
    9829           0 :         ev_fd->eventfd_async = eventfd_async;
    9830           0 :         ctx->has_evfd = true;
    9831           0 :         rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
    9832           0 :         return 0;
    9833             : }
    9834             : 
    9835           0 : static void io_eventfd_put(struct rcu_head *rcu)
    9836             : {
    9837           0 :         struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
    9838             : 
    9839           0 :         eventfd_ctx_put(ev_fd->cq_ev_fd);
    9840           0 :         kfree(ev_fd);
    9841           0 : }
    9842             : 
    9843             : static int io_eventfd_unregister(struct io_ring_ctx *ctx)
    9844             : {
    9845             :         struct io_ev_fd *ev_fd;
    9846             : 
    9847           0 :         ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
    9848             :                                         lockdep_is_held(&ctx->uring_lock));
    9849           0 :         if (ev_fd) {
    9850           0 :                 ctx->has_evfd = false;
    9851           0 :                 rcu_assign_pointer(ctx->io_ev_fd, NULL);
    9852           0 :                 call_rcu(&ev_fd->rcu, io_eventfd_put);
    9853             :                 return 0;
    9854             :         }
    9855             : 
    9856             :         return -ENXIO;
    9857             : }
    9858             : 
    9859           0 : static void io_destroy_buffers(struct io_ring_ctx *ctx)
    9860             : {
    9861             :         int i;
    9862             : 
    9863           0 :         for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++) {
    9864           0 :                 struct list_head *list = &ctx->io_buffers[i];
    9865             : 
    9866           0 :                 while (!list_empty(list)) {
    9867             :                         struct io_buffer_list *bl;
    9868             : 
    9869           0 :                         bl = list_first_entry(list, struct io_buffer_list, list);
    9870           0 :                         __io_remove_buffers(ctx, bl, -1U);
    9871           0 :                         list_del(&bl->list);
    9872           0 :                         kfree(bl);
    9873             :                 }
    9874             :         }
    9875             : 
    9876           0 :         while (!list_empty(&ctx->io_buffers_pages)) {
    9877             :                 struct page *page;
    9878             : 
    9879           0 :                 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
    9880           0 :                 list_del_init(&page->lru);
    9881           0 :                 __free_page(page);
    9882             :         }
    9883           0 : }
    9884             : 
    9885           0 : static void io_req_caches_free(struct io_ring_ctx *ctx)
    9886             : {
    9887           0 :         struct io_submit_state *state = &ctx->submit_state;
    9888           0 :         int nr = 0;
    9889             : 
    9890           0 :         mutex_lock(&ctx->uring_lock);
    9891             :         io_flush_cached_locked_reqs(ctx, state);
    9892             : 
    9893           0 :         while (state->free_list.next) {
    9894             :                 struct io_wq_work_node *node;
    9895             :                 struct io_kiocb *req;
    9896             : 
    9897           0 :                 node = wq_stack_extract(&state->free_list);
    9898           0 :                 req = container_of(node, struct io_kiocb, comp_list);
    9899           0 :                 kmem_cache_free(req_cachep, req);
    9900           0 :                 nr++;
    9901             :         }
    9902           0 :         if (nr)
    9903           0 :                 percpu_ref_put_many(&ctx->refs, nr);
    9904           0 :         mutex_unlock(&ctx->uring_lock);
    9905           0 : }
    9906             : 
    9907             : static void io_wait_rsrc_data(struct io_rsrc_data *data)
    9908             : {
    9909           0 :         if (data && !atomic_dec_and_test(&data->refs))
    9910           0 :                 wait_for_completion(&data->done);
    9911             : }
    9912             : 
    9913           0 : static void io_flush_apoll_cache(struct io_ring_ctx *ctx)
    9914             : {
    9915             :         struct async_poll *apoll;
    9916             : 
    9917           0 :         while (!list_empty(&ctx->apoll_cache)) {
    9918           0 :                 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
    9919             :                                                 poll.wait.entry);
    9920           0 :                 list_del(&apoll->poll.wait.entry);
    9921           0 :                 kfree(apoll);
    9922             :         }
    9923           0 : }
    9924             : 
    9925           0 : static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
    9926             : {
    9927           0 :         io_sq_thread_finish(ctx);
    9928             : 
    9929           0 :         if (ctx->mm_account) {
    9930           0 :                 mmdrop(ctx->mm_account);
    9931           0 :                 ctx->mm_account = NULL;
    9932             :         }
    9933             : 
    9934           0 :         io_rsrc_refs_drop(ctx);
    9935             :         /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
    9936           0 :         io_wait_rsrc_data(ctx->buf_data);
    9937           0 :         io_wait_rsrc_data(ctx->file_data);
    9938             : 
    9939           0 :         mutex_lock(&ctx->uring_lock);
    9940           0 :         if (ctx->buf_data)
    9941           0 :                 __io_sqe_buffers_unregister(ctx);
    9942           0 :         if (ctx->file_data)
    9943           0 :                 __io_sqe_files_unregister(ctx);
    9944           0 :         if (ctx->rings)
    9945           0 :                 __io_cqring_overflow_flush(ctx, true);
    9946           0 :         io_eventfd_unregister(ctx);
    9947           0 :         io_flush_apoll_cache(ctx);
    9948           0 :         mutex_unlock(&ctx->uring_lock);
    9949           0 :         io_destroy_buffers(ctx);
    9950           0 :         if (ctx->sq_creds)
    9951           0 :                 put_cred(ctx->sq_creds);
    9952             : 
    9953             :         /* there are no registered resources left, nobody uses it */
    9954           0 :         if (ctx->rsrc_node)
    9955           0 :                 io_rsrc_node_destroy(ctx->rsrc_node);
    9956           0 :         if (ctx->rsrc_backup_node)
    9957           0 :                 io_rsrc_node_destroy(ctx->rsrc_backup_node);
    9958           0 :         flush_delayed_work(&ctx->rsrc_put_work);
    9959           0 :         flush_delayed_work(&ctx->fallback_work);
    9960             : 
    9961           0 :         WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
    9962           0 :         WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
    9963             : 
    9964             : #if defined(CONFIG_UNIX)
    9965             :         if (ctx->ring_sock) {
    9966             :                 ctx->ring_sock->file = NULL; /* so that iput() is called */
    9967             :                 sock_release(ctx->ring_sock);
    9968             :         }
    9969             : #endif
    9970           0 :         WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
    9971             : 
    9972           0 :         io_mem_free(ctx->rings);
    9973           0 :         io_mem_free(ctx->sq_sqes);
    9974             : 
    9975           0 :         percpu_ref_exit(&ctx->refs);
    9976           0 :         free_uid(ctx->user);
    9977           0 :         io_req_caches_free(ctx);
    9978           0 :         if (ctx->hash_map)
    9979           0 :                 io_wq_put_hash(ctx->hash_map);
    9980           0 :         kfree(ctx->cancel_hash);
    9981           0 :         kfree(ctx->dummy_ubuf);
    9982           0 :         kfree(ctx->io_buffers);
    9983           0 :         kfree(ctx);
    9984           0 : }
    9985             : 
    9986           0 : static __poll_t io_uring_poll(struct file *file, poll_table *wait)
    9987             : {
    9988           0 :         struct io_ring_ctx *ctx = file->private_data;
    9989           0 :         __poll_t mask = 0;
    9990             : 
    9991           0 :         poll_wait(file, &ctx->cq_wait, wait);
    9992             :         /*
    9993             :          * synchronizes with barrier from wq_has_sleeper call in
    9994             :          * io_commit_cqring
    9995             :          */
    9996           0 :         smp_rmb();
    9997           0 :         if (!io_sqring_full(ctx))
    9998           0 :                 mask |= EPOLLOUT | EPOLLWRNORM;
    9999             : 
   10000             :         /*
   10001             :          * Don't flush cqring overflow list here, just do a simple check.
   10002             :          * Otherwise there could possible be ABBA deadlock:
   10003             :          *      CPU0                    CPU1
   10004             :          *      ----                    ----
   10005             :          * lock(&ctx->uring_lock);
   10006             :          *                              lock(&ep->mtx);
   10007             :          *                              lock(&ctx->uring_lock);
   10008             :          * lock(&ep->mtx);
   10009             :          *
   10010             :          * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
   10011             :          * pushs them to do the flush.
   10012             :          */
   10013           0 :         if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
   10014           0 :                 mask |= EPOLLIN | EPOLLRDNORM;
   10015             : 
   10016           0 :         return mask;
   10017             : }
   10018             : 
   10019           0 : static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
   10020             : {
   10021             :         const struct cred *creds;
   10022             : 
   10023           0 :         creds = xa_erase(&ctx->personalities, id);
   10024           0 :         if (creds) {
   10025             :                 put_cred(creds);
   10026             :                 return 0;
   10027             :         }
   10028             : 
   10029             :         return -EINVAL;
   10030             : }
   10031             : 
   10032             : struct io_tctx_exit {
   10033             :         struct callback_head            task_work;
   10034             :         struct completion               completion;
   10035             :         struct io_ring_ctx              *ctx;
   10036             : };
   10037             : 
   10038           0 : static __cold void io_tctx_exit_cb(struct callback_head *cb)
   10039             : {
   10040           0 :         struct io_uring_task *tctx = current->io_uring;
   10041             :         struct io_tctx_exit *work;
   10042             : 
   10043           0 :         work = container_of(cb, struct io_tctx_exit, task_work);
   10044             :         /*
   10045             :          * When @in_idle, we're in cancellation and it's racy to remove the
   10046             :          * node. It'll be removed by the end of cancellation, just ignore it.
   10047             :          */
   10048           0 :         if (!atomic_read(&tctx->in_idle))
   10049           0 :                 io_uring_del_tctx_node((unsigned long)work->ctx);
   10050           0 :         complete(&work->completion);
   10051           0 : }
   10052             : 
   10053           0 : static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
   10054             : {
   10055           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
   10056             : 
   10057           0 :         return req->ctx == data;
   10058             : }
   10059             : 
   10060           0 : static __cold void io_ring_exit_work(struct work_struct *work)
   10061             : {
   10062           0 :         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
   10063           0 :         unsigned long timeout = jiffies + HZ * 60 * 5;
   10064           0 :         unsigned long interval = HZ / 20;
   10065             :         struct io_tctx_exit exit;
   10066             :         struct io_tctx_node *node;
   10067             :         int ret;
   10068             : 
   10069             :         /*
   10070             :          * If we're doing polled IO and end up having requests being
   10071             :          * submitted async (out-of-line), then completions can come in while
   10072             :          * we're waiting for refs to drop. We need to reap these manually,
   10073             :          * as nobody else will be looking for them.
   10074             :          */
   10075             :         do {
   10076           0 :                 io_uring_try_cancel_requests(ctx, NULL, true);
   10077           0 :                 if (ctx->sq_data) {
   10078           0 :                         struct io_sq_data *sqd = ctx->sq_data;
   10079             :                         struct task_struct *tsk;
   10080             : 
   10081           0 :                         io_sq_thread_park(sqd);
   10082           0 :                         tsk = sqd->thread;
   10083           0 :                         if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
   10084           0 :                                 io_wq_cancel_cb(tsk->io_uring->io_wq,
   10085             :                                                 io_cancel_ctx_cb, ctx, true);
   10086           0 :                         io_sq_thread_unpark(sqd);
   10087             :                 }
   10088             : 
   10089           0 :                 io_req_caches_free(ctx);
   10090             : 
   10091           0 :                 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
   10092             :                         /* there is little hope left, don't run it too often */
   10093           0 :                         interval = HZ * 60;
   10094             :                 }
   10095           0 :         } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
   10096             : 
   10097           0 :         init_completion(&exit.completion);
   10098           0 :         init_task_work(&exit.task_work, io_tctx_exit_cb);
   10099           0 :         exit.ctx = ctx;
   10100             :         /*
   10101             :          * Some may use context even when all refs and requests have been put,
   10102             :          * and they are free to do so while still holding uring_lock or
   10103             :          * completion_lock, see io_req_task_submit(). Apart from other work,
   10104             :          * this lock/unlock section also waits them to finish.
   10105             :          */
   10106           0 :         mutex_lock(&ctx->uring_lock);
   10107           0 :         while (!list_empty(&ctx->tctx_list)) {
   10108           0 :                 WARN_ON_ONCE(time_after(jiffies, timeout));
   10109             : 
   10110           0 :                 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
   10111             :                                         ctx_node);
   10112             :                 /* don't spin on a single task if cancellation failed */
   10113           0 :                 list_rotate_left(&ctx->tctx_list);
   10114           0 :                 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
   10115           0 :                 if (WARN_ON_ONCE(ret))
   10116           0 :                         continue;
   10117             : 
   10118           0 :                 mutex_unlock(&ctx->uring_lock);
   10119           0 :                 wait_for_completion(&exit.completion);
   10120           0 :                 mutex_lock(&ctx->uring_lock);
   10121             :         }
   10122           0 :         mutex_unlock(&ctx->uring_lock);
   10123           0 :         spin_lock(&ctx->completion_lock);
   10124           0 :         spin_unlock(&ctx->completion_lock);
   10125             : 
   10126           0 :         io_ring_ctx_free(ctx);
   10127           0 : }
   10128             : 
   10129             : /* Returns true if we found and killed one or more timeouts */
   10130           0 : static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
   10131             :                                     struct task_struct *tsk, bool cancel_all)
   10132             : {
   10133             :         struct io_kiocb *req, *tmp;
   10134           0 :         int canceled = 0;
   10135             : 
   10136           0 :         spin_lock(&ctx->completion_lock);
   10137           0 :         spin_lock_irq(&ctx->timeout_lock);
   10138           0 :         list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
   10139           0 :                 if (io_match_task(req, tsk, cancel_all)) {
   10140           0 :                         io_kill_timeout(req, -ECANCELED);
   10141           0 :                         canceled++;
   10142             :                 }
   10143             :         }
   10144           0 :         spin_unlock_irq(&ctx->timeout_lock);
   10145           0 :         if (canceled != 0)
   10146           0 :                 io_commit_cqring(ctx);
   10147           0 :         spin_unlock(&ctx->completion_lock);
   10148           0 :         if (canceled != 0)
   10149           0 :                 io_cqring_ev_posted(ctx);
   10150           0 :         return canceled != 0;
   10151             : }
   10152             : 
   10153           0 : static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
   10154             : {
   10155             :         unsigned long index;
   10156             :         struct creds *creds;
   10157             : 
   10158           0 :         mutex_lock(&ctx->uring_lock);
   10159           0 :         percpu_ref_kill(&ctx->refs);
   10160           0 :         if (ctx->rings)
   10161           0 :                 __io_cqring_overflow_flush(ctx, true);
   10162           0 :         xa_for_each(&ctx->personalities, index, creds)
   10163           0 :                 io_unregister_personality(ctx, index);
   10164           0 :         mutex_unlock(&ctx->uring_lock);
   10165             : 
   10166           0 :         io_kill_timeouts(ctx, NULL, true);
   10167           0 :         io_poll_remove_all(ctx, NULL, true);
   10168             : 
   10169             :         /* if we failed setting up the ctx, we might not have any rings */
   10170           0 :         io_iopoll_try_reap_events(ctx);
   10171             : 
   10172           0 :         INIT_WORK(&ctx->exit_work, io_ring_exit_work);
   10173             :         /*
   10174             :          * Use system_unbound_wq to avoid spawning tons of event kworkers
   10175             :          * if we're exiting a ton of rings at the same time. It just adds
   10176             :          * noise and overhead, there's no discernable change in runtime
   10177             :          * over using system_wq.
   10178             :          */
   10179           0 :         queue_work(system_unbound_wq, &ctx->exit_work);
   10180           0 : }
   10181             : 
   10182           0 : static int io_uring_release(struct inode *inode, struct file *file)
   10183             : {
   10184           0 :         struct io_ring_ctx *ctx = file->private_data;
   10185             : 
   10186           0 :         file->private_data = NULL;
   10187           0 :         io_ring_ctx_wait_and_kill(ctx);
   10188           0 :         return 0;
   10189             : }
   10190             : 
   10191             : struct io_task_cancel {
   10192             :         struct task_struct *task;
   10193             :         bool all;
   10194             : };
   10195             : 
   10196           0 : static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
   10197             : {
   10198           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
   10199           0 :         struct io_task_cancel *cancel = data;
   10200             : 
   10201           0 :         return io_match_task_safe(req, cancel->task, cancel->all);
   10202             : }
   10203             : 
   10204           0 : static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
   10205             :                                          struct task_struct *task,
   10206             :                                          bool cancel_all)
   10207             : {
   10208             :         struct io_defer_entry *de;
   10209           0 :         LIST_HEAD(list);
   10210             : 
   10211           0 :         spin_lock(&ctx->completion_lock);
   10212           0 :         list_for_each_entry_reverse(de, &ctx->defer_list, list) {
   10213           0 :                 if (io_match_task_safe(de->req, task, cancel_all)) {
   10214           0 :                         list_cut_position(&list, &ctx->defer_list, &de->list);
   10215           0 :                         break;
   10216             :                 }
   10217             :         }
   10218           0 :         spin_unlock(&ctx->completion_lock);
   10219           0 :         if (list_empty(&list))
   10220             :                 return false;
   10221             : 
   10222           0 :         while (!list_empty(&list)) {
   10223           0 :                 de = list_first_entry(&list, struct io_defer_entry, list);
   10224           0 :                 list_del_init(&de->list);
   10225           0 :                 io_req_complete_failed(de->req, -ECANCELED);
   10226           0 :                 kfree(de);
   10227             :         }
   10228             :         return true;
   10229             : }
   10230             : 
   10231           0 : static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
   10232             : {
   10233             :         struct io_tctx_node *node;
   10234             :         enum io_wq_cancel cret;
   10235           0 :         bool ret = false;
   10236             : 
   10237           0 :         mutex_lock(&ctx->uring_lock);
   10238           0 :         list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
   10239           0 :                 struct io_uring_task *tctx = node->task->io_uring;
   10240             : 
   10241             :                 /*
   10242             :                  * io_wq will stay alive while we hold uring_lock, because it's
   10243             :                  * killed after ctx nodes, which requires to take the lock.
   10244             :                  */
   10245           0 :                 if (!tctx || !tctx->io_wq)
   10246           0 :                         continue;
   10247           0 :                 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
   10248           0 :                 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
   10249             :         }
   10250           0 :         mutex_unlock(&ctx->uring_lock);
   10251             : 
   10252           0 :         return ret;
   10253             : }
   10254             : 
   10255           0 : static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
   10256             :                                                 struct task_struct *task,
   10257             :                                                 bool cancel_all)
   10258             : {
   10259           0 :         struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
   10260           0 :         struct io_uring_task *tctx = task ? task->io_uring : NULL;
   10261             : 
   10262           0 :         while (1) {
   10263             :                 enum io_wq_cancel cret;
   10264           0 :                 bool ret = false;
   10265             : 
   10266           0 :                 if (!task) {
   10267           0 :                         ret |= io_uring_try_cancel_iowq(ctx);
   10268           0 :                 } else if (tctx && tctx->io_wq) {
   10269             :                         /*
   10270             :                          * Cancels requests of all rings, not only @ctx, but
   10271             :                          * it's fine as the task is in exit/exec.
   10272             :                          */
   10273           0 :                         cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
   10274             :                                                &cancel, true);
   10275           0 :                         ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
   10276             :                 }
   10277             : 
   10278             :                 /* SQPOLL thread does its own polling */
   10279           0 :                 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
   10280           0 :                     (ctx->sq_data && ctx->sq_data->thread == current)) {
   10281           0 :                         while (!wq_list_empty(&ctx->iopoll_list)) {
   10282           0 :                                 io_iopoll_try_reap_events(ctx);
   10283           0 :                                 ret = true;
   10284             :                         }
   10285             :                 }
   10286             : 
   10287           0 :                 ret |= io_cancel_defer_files(ctx, task, cancel_all);
   10288           0 :                 ret |= io_poll_remove_all(ctx, task, cancel_all);
   10289           0 :                 ret |= io_kill_timeouts(ctx, task, cancel_all);
   10290           0 :                 if (task)
   10291           0 :                         ret |= io_run_task_work();
   10292           0 :                 if (!ret)
   10293             :                         break;
   10294           0 :                 cond_resched();
   10295             :         }
   10296           0 : }
   10297             : 
   10298           0 : static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
   10299             : {
   10300           0 :         struct io_uring_task *tctx = current->io_uring;
   10301             :         struct io_tctx_node *node;
   10302             :         int ret;
   10303             : 
   10304           0 :         if (unlikely(!tctx)) {
   10305           0 :                 ret = io_uring_alloc_task_context(current, ctx);
   10306           0 :                 if (unlikely(ret))
   10307             :                         return ret;
   10308             : 
   10309           0 :                 tctx = current->io_uring;
   10310           0 :                 if (ctx->iowq_limits_set) {
   10311           0 :                         unsigned int limits[2] = { ctx->iowq_limits[0],
   10312           0 :                                                    ctx->iowq_limits[1], };
   10313             : 
   10314           0 :                         ret = io_wq_max_workers(tctx->io_wq, limits);
   10315           0 :                         if (ret)
   10316           0 :                                 return ret;
   10317             :                 }
   10318             :         }
   10319           0 :         if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
   10320           0 :                 node = kmalloc(sizeof(*node), GFP_KERNEL);
   10321           0 :                 if (!node)
   10322             :                         return -ENOMEM;
   10323           0 :                 node->ctx = ctx;
   10324           0 :                 node->task = current;
   10325             : 
   10326           0 :                 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
   10327             :                                         node, GFP_KERNEL));
   10328           0 :                 if (ret) {
   10329           0 :                         kfree(node);
   10330           0 :                         return ret;
   10331             :                 }
   10332             : 
   10333           0 :                 mutex_lock(&ctx->uring_lock);
   10334           0 :                 list_add(&node->ctx_node, &ctx->tctx_list);
   10335           0 :                 mutex_unlock(&ctx->uring_lock);
   10336             :         }
   10337           0 :         tctx->last = ctx;
   10338           0 :         return 0;
   10339             : }
   10340             : 
   10341             : /*
   10342             :  * Note that this task has used io_uring. We use it for cancelation purposes.
   10343             :  */
   10344           0 : static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
   10345             : {
   10346           0 :         struct io_uring_task *tctx = current->io_uring;
   10347             : 
   10348           0 :         if (likely(tctx && tctx->last == ctx))
   10349             :                 return 0;
   10350           0 :         return __io_uring_add_tctx_node(ctx);
   10351             : }
   10352             : 
   10353             : /*
   10354             :  * Remove this io_uring_file -> task mapping.
   10355             :  */
   10356           0 : static __cold void io_uring_del_tctx_node(unsigned long index)
   10357             : {
   10358           0 :         struct io_uring_task *tctx = current->io_uring;
   10359             :         struct io_tctx_node *node;
   10360             : 
   10361           0 :         if (!tctx)
   10362             :                 return;
   10363           0 :         node = xa_erase(&tctx->xa, index);
   10364           0 :         if (!node)
   10365             :                 return;
   10366             : 
   10367           0 :         WARN_ON_ONCE(current != node->task);
   10368           0 :         WARN_ON_ONCE(list_empty(&node->ctx_node));
   10369             : 
   10370           0 :         mutex_lock(&node->ctx->uring_lock);
   10371           0 :         list_del(&node->ctx_node);
   10372           0 :         mutex_unlock(&node->ctx->uring_lock);
   10373             : 
   10374           0 :         if (tctx->last == node->ctx)
   10375           0 :                 tctx->last = NULL;
   10376           0 :         kfree(node);
   10377             : }
   10378             : 
   10379           0 : static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
   10380             : {
   10381           0 :         struct io_wq *wq = tctx->io_wq;
   10382             :         struct io_tctx_node *node;
   10383             :         unsigned long index;
   10384             : 
   10385           0 :         xa_for_each(&tctx->xa, index, node) {
   10386           0 :                 io_uring_del_tctx_node(index);
   10387           0 :                 cond_resched();
   10388             :         }
   10389           0 :         if (wq) {
   10390             :                 /*
   10391             :                  * Must be after io_uring_del_tctx_node() (removes nodes under
   10392             :                  * uring_lock) to avoid race with io_uring_try_cancel_iowq().
   10393             :                  */
   10394           0 :                 io_wq_put_and_exit(wq);
   10395           0 :                 tctx->io_wq = NULL;
   10396             :         }
   10397           0 : }
   10398             : 
   10399             : static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
   10400             : {
   10401           0 :         if (tracked)
   10402             :                 return 0;
   10403           0 :         return percpu_counter_sum(&tctx->inflight);
   10404             : }
   10405             : 
   10406             : /*
   10407             :  * Find any io_uring ctx that this task has registered or done IO on, and cancel
   10408             :  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
   10409             :  */
   10410           0 : static __cold void io_uring_cancel_generic(bool cancel_all,
   10411             :                                            struct io_sq_data *sqd)
   10412             : {
   10413           0 :         struct io_uring_task *tctx = current->io_uring;
   10414             :         struct io_ring_ctx *ctx;
   10415             :         s64 inflight;
   10416           0 :         DEFINE_WAIT(wait);
   10417             : 
   10418           0 :         WARN_ON_ONCE(sqd && sqd->thread != current);
   10419             : 
   10420           0 :         if (!current->io_uring)
   10421           0 :                 return;
   10422           0 :         if (tctx->io_wq)
   10423           0 :                 io_wq_exit_start(tctx->io_wq);
   10424             : 
   10425           0 :         atomic_inc(&tctx->in_idle);
   10426             :         do {
   10427           0 :                 io_uring_drop_tctx_refs(current);
   10428             :                 /* read completions before cancelations */
   10429           0 :                 inflight = tctx_inflight(tctx, !cancel_all);
   10430           0 :                 if (!inflight)
   10431             :                         break;
   10432             : 
   10433           0 :                 if (!sqd) {
   10434             :                         struct io_tctx_node *node;
   10435             :                         unsigned long index;
   10436             : 
   10437           0 :                         xa_for_each(&tctx->xa, index, node) {
   10438             :                                 /* sqpoll task will cancel all its requests */
   10439           0 :                                 if (node->ctx->sq_data)
   10440           0 :                                         continue;
   10441           0 :                                 io_uring_try_cancel_requests(node->ctx, current,
   10442             :                                                              cancel_all);
   10443             :                         }
   10444             :                 } else {
   10445           0 :                         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
   10446           0 :                                 io_uring_try_cancel_requests(ctx, current,
   10447             :                                                              cancel_all);
   10448             :                 }
   10449             : 
   10450           0 :                 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
   10451           0 :                 io_run_task_work();
   10452           0 :                 io_uring_drop_tctx_refs(current);
   10453             : 
   10454             :                 /*
   10455             :                  * If we've seen completions, retry without waiting. This
   10456             :                  * avoids a race where a completion comes in before we did
   10457             :                  * prepare_to_wait().
   10458             :                  */
   10459           0 :                 if (inflight == tctx_inflight(tctx, !cancel_all))
   10460           0 :                         schedule();
   10461           0 :                 finish_wait(&tctx->wait, &wait);
   10462             :         } while (1);
   10463             : 
   10464           0 :         io_uring_clean_tctx(tctx);
   10465           0 :         if (cancel_all) {
   10466             :                 /*
   10467             :                  * We shouldn't run task_works after cancel, so just leave
   10468             :                  * ->in_idle set for normal exit.
   10469             :                  */
   10470           0 :                 atomic_dec(&tctx->in_idle);
   10471             :                 /* for exec all current's requests should be gone, kill tctx */
   10472           0 :                 __io_uring_free(current);
   10473             :         }
   10474             : }
   10475             : 
   10476           0 : void __io_uring_cancel(bool cancel_all)
   10477             : {
   10478           0 :         io_uring_cancel_generic(cancel_all, NULL);
   10479           0 : }
   10480             : 
   10481           0 : void io_uring_unreg_ringfd(void)
   10482             : {
   10483           0 :         struct io_uring_task *tctx = current->io_uring;
   10484             :         int i;
   10485             : 
   10486           0 :         for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
   10487           0 :                 if (tctx->registered_rings[i]) {
   10488           0 :                         fput(tctx->registered_rings[i]);
   10489           0 :                         tctx->registered_rings[i] = NULL;
   10490             :                 }
   10491             :         }
   10492           0 : }
   10493             : 
   10494           0 : static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
   10495             :                                      int start, int end)
   10496             : {
   10497             :         struct file *file;
   10498             :         int offset;
   10499             : 
   10500           0 :         for (offset = start; offset < end; offset++) {
   10501           0 :                 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
   10502           0 :                 if (tctx->registered_rings[offset])
   10503           0 :                         continue;
   10504             : 
   10505           0 :                 file = fget(fd);
   10506           0 :                 if (!file) {
   10507             :                         return -EBADF;
   10508           0 :                 } else if (file->f_op != &io_uring_fops) {
   10509           0 :                         fput(file);
   10510             :                         return -EOPNOTSUPP;
   10511             :                 }
   10512           0 :                 tctx->registered_rings[offset] = file;
   10513             :                 return offset;
   10514             :         }
   10515             : 
   10516             :         return -EBUSY;
   10517             : }
   10518             : 
   10519             : /*
   10520             :  * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
   10521             :  * invocation. User passes in an array of struct io_uring_rsrc_update
   10522             :  * with ->data set to the ring_fd, and ->offset given for the desired
   10523             :  * index. If no index is desired, application may set ->offset == -1U
   10524             :  * and we'll find an available index. Returns number of entries
   10525             :  * successfully processed, or < 0 on error if none were processed.
   10526             :  */
   10527           0 : static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
   10528             :                               unsigned nr_args)
   10529             : {
   10530           0 :         struct io_uring_rsrc_update __user *arg = __arg;
   10531             :         struct io_uring_rsrc_update reg;
   10532             :         struct io_uring_task *tctx;
   10533             :         int ret, i;
   10534             : 
   10535           0 :         if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
   10536             :                 return -EINVAL;
   10537             : 
   10538           0 :         mutex_unlock(&ctx->uring_lock);
   10539           0 :         ret = io_uring_add_tctx_node(ctx);
   10540           0 :         mutex_lock(&ctx->uring_lock);
   10541           0 :         if (ret)
   10542             :                 return ret;
   10543             : 
   10544           0 :         tctx = current->io_uring;
   10545           0 :         for (i = 0; i < nr_args; i++) {
   10546             :                 int start, end;
   10547             : 
   10548           0 :                 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
   10549             :                         ret = -EFAULT;
   10550             :                         break;
   10551             :                 }
   10552             : 
   10553           0 :                 if (reg.resv) {
   10554             :                         ret = -EINVAL;
   10555             :                         break;
   10556             :                 }
   10557             : 
   10558           0 :                 if (reg.offset == -1U) {
   10559             :                         start = 0;
   10560             :                         end = IO_RINGFD_REG_MAX;
   10561             :                 } else {
   10562           0 :                         if (reg.offset >= IO_RINGFD_REG_MAX) {
   10563             :                                 ret = -EINVAL;
   10564             :                                 break;
   10565             :                         }
   10566           0 :                         start = reg.offset;
   10567           0 :                         end = start + 1;
   10568             :                 }
   10569             : 
   10570           0 :                 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
   10571           0 :                 if (ret < 0)
   10572             :                         break;
   10573             : 
   10574           0 :                 reg.offset = ret;
   10575           0 :                 if (copy_to_user(&arg[i], &reg, sizeof(reg))) {
   10576           0 :                         fput(tctx->registered_rings[reg.offset]);
   10577           0 :                         tctx->registered_rings[reg.offset] = NULL;
   10578           0 :                         ret = -EFAULT;
   10579           0 :                         break;
   10580             :                 }
   10581             :         }
   10582             : 
   10583           0 :         return i ? i : ret;
   10584             : }
   10585             : 
   10586           0 : static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
   10587             :                                 unsigned nr_args)
   10588             : {
   10589           0 :         struct io_uring_rsrc_update __user *arg = __arg;
   10590           0 :         struct io_uring_task *tctx = current->io_uring;
   10591             :         struct io_uring_rsrc_update reg;
   10592           0 :         int ret = 0, i;
   10593             : 
   10594           0 :         if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
   10595             :                 return -EINVAL;
   10596           0 :         if (!tctx)
   10597             :                 return 0;
   10598             : 
   10599           0 :         for (i = 0; i < nr_args; i++) {
   10600           0 :                 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
   10601             :                         ret = -EFAULT;
   10602             :                         break;
   10603             :                 }
   10604           0 :                 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
   10605             :                         ret = -EINVAL;
   10606             :                         break;
   10607             :                 }
   10608             : 
   10609           0 :                 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
   10610           0 :                 if (tctx->registered_rings[reg.offset]) {
   10611           0 :                         fput(tctx->registered_rings[reg.offset]);
   10612           0 :                         tctx->registered_rings[reg.offset] = NULL;
   10613             :                 }
   10614             :         }
   10615             : 
   10616           0 :         return i ? i : ret;
   10617             : }
   10618             : 
   10619           0 : static void *io_uring_validate_mmap_request(struct file *file,
   10620             :                                             loff_t pgoff, size_t sz)
   10621             : {
   10622           0 :         struct io_ring_ctx *ctx = file->private_data;
   10623           0 :         loff_t offset = pgoff << PAGE_SHIFT;
   10624             :         struct page *page;
   10625             :         void *ptr;
   10626             : 
   10627           0 :         switch (offset) {
   10628             :         case IORING_OFF_SQ_RING:
   10629             :         case IORING_OFF_CQ_RING:
   10630           0 :                 ptr = ctx->rings;
   10631             :                 break;
   10632             :         case IORING_OFF_SQES:
   10633           0 :                 ptr = ctx->sq_sqes;
   10634             :                 break;
   10635             :         default:
   10636             :                 return ERR_PTR(-EINVAL);
   10637             :         }
   10638             : 
   10639           0 :         page = virt_to_head_page(ptr);
   10640           0 :         if (sz > page_size(page))
   10641             :                 return ERR_PTR(-EINVAL);
   10642             : 
   10643             :         return ptr;
   10644             : }
   10645             : 
   10646             : #ifdef CONFIG_MMU
   10647             : 
   10648           0 : static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
   10649             : {
   10650           0 :         size_t sz = vma->vm_end - vma->vm_start;
   10651             :         unsigned long pfn;
   10652             :         void *ptr;
   10653             : 
   10654           0 :         ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
   10655           0 :         if (IS_ERR(ptr))
   10656           0 :                 return PTR_ERR(ptr);
   10657             : 
   10658           0 :         pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
   10659           0 :         return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
   10660             : }
   10661             : 
   10662             : #else /* !CONFIG_MMU */
   10663             : 
   10664             : static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
   10665             : {
   10666             :         return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
   10667             : }
   10668             : 
   10669             : static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
   10670             : {
   10671             :         return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
   10672             : }
   10673             : 
   10674             : static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
   10675             :         unsigned long addr, unsigned long len,
   10676             :         unsigned long pgoff, unsigned long flags)
   10677             : {
   10678             :         void *ptr;
   10679             : 
   10680             :         ptr = io_uring_validate_mmap_request(file, pgoff, len);
   10681             :         if (IS_ERR(ptr))
   10682             :                 return PTR_ERR(ptr);
   10683             : 
   10684             :         return (unsigned long) ptr;
   10685             : }
   10686             : 
   10687             : #endif /* !CONFIG_MMU */
   10688             : 
   10689           0 : static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
   10690             : {
   10691           0 :         DEFINE_WAIT(wait);
   10692             : 
   10693             :         do {
   10694           0 :                 if (!io_sqring_full(ctx))
   10695             :                         break;
   10696           0 :                 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
   10697             : 
   10698           0 :                 if (!io_sqring_full(ctx))
   10699             :                         break;
   10700           0 :                 schedule();
   10701           0 :         } while (!signal_pending(current));
   10702             : 
   10703           0 :         finish_wait(&ctx->sqo_sq_wait, &wait);
   10704           0 :         return 0;
   10705             : }
   10706             : 
   10707           0 : static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
   10708             :                           struct __kernel_timespec __user **ts,
   10709             :                           const sigset_t __user **sig)
   10710             : {
   10711             :         struct io_uring_getevents_arg arg;
   10712             : 
   10713             :         /*
   10714             :          * If EXT_ARG isn't set, then we have no timespec and the argp pointer
   10715             :          * is just a pointer to the sigset_t.
   10716             :          */
   10717           0 :         if (!(flags & IORING_ENTER_EXT_ARG)) {
   10718           0 :                 *sig = (const sigset_t __user *) argp;
   10719           0 :                 *ts = NULL;
   10720           0 :                 return 0;
   10721             :         }
   10722             : 
   10723             :         /*
   10724             :          * EXT_ARG is set - ensure we agree on the size of it and copy in our
   10725             :          * timespec and sigset_t pointers if good.
   10726             :          */
   10727           0 :         if (*argsz != sizeof(arg))
   10728             :                 return -EINVAL;
   10729           0 :         if (copy_from_user(&arg, argp, sizeof(arg)))
   10730             :                 return -EFAULT;
   10731           0 :         if (arg.pad)
   10732             :                 return -EINVAL;
   10733           0 :         *sig = u64_to_user_ptr(arg.sigmask);
   10734           0 :         *argsz = arg.sigmask_sz;
   10735           0 :         *ts = u64_to_user_ptr(arg.ts);
   10736           0 :         return 0;
   10737             : }
   10738             : 
   10739           0 : SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
   10740             :                 u32, min_complete, u32, flags, const void __user *, argp,
   10741             :                 size_t, argsz)
   10742             : {
   10743             :         struct io_ring_ctx *ctx;
   10744           0 :         int submitted = 0;
   10745             :         struct fd f;
   10746             :         long ret;
   10747             : 
   10748           0 :         io_run_task_work();
   10749             : 
   10750           0 :         if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
   10751             :                                IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
   10752             :                                IORING_ENTER_REGISTERED_RING)))
   10753             :                 return -EINVAL;
   10754             : 
   10755             :         /*
   10756             :          * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
   10757             :          * need only dereference our task private array to find it.
   10758             :          */
   10759           0 :         if (flags & IORING_ENTER_REGISTERED_RING) {
   10760           0 :                 struct io_uring_task *tctx = current->io_uring;
   10761             : 
   10762           0 :                 if (!tctx || fd >= IO_RINGFD_REG_MAX)
   10763             :                         return -EINVAL;
   10764           0 :                 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
   10765           0 :                 f.file = tctx->registered_rings[fd];
   10766           0 :                 if (unlikely(!f.file))
   10767             :                         return -EBADF;
   10768             :         } else {
   10769           0 :                 f = fdget(fd);
   10770           0 :                 if (unlikely(!f.file))
   10771             :                         return -EBADF;
   10772             :         }
   10773             : 
   10774           0 :         ret = -EOPNOTSUPP;
   10775           0 :         if (unlikely(f.file->f_op != &io_uring_fops))
   10776             :                 goto out_fput;
   10777             : 
   10778           0 :         ret = -ENXIO;
   10779           0 :         ctx = f.file->private_data;
   10780           0 :         if (unlikely(!percpu_ref_tryget(&ctx->refs)))
   10781             :                 goto out_fput;
   10782             : 
   10783           0 :         ret = -EBADFD;
   10784           0 :         if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
   10785             :                 goto out;
   10786             : 
   10787             :         /*
   10788             :          * For SQ polling, the thread will do all submissions and completions.
   10789             :          * Just return the requested submit count, and wake the thread if
   10790             :          * we were asked to.
   10791             :          */
   10792           0 :         ret = 0;
   10793           0 :         if (ctx->flags & IORING_SETUP_SQPOLL) {
   10794           0 :                 io_cqring_overflow_flush(ctx);
   10795             : 
   10796           0 :                 if (unlikely(ctx->sq_data->thread == NULL)) {
   10797             :                         ret = -EOWNERDEAD;
   10798             :                         goto out;
   10799             :                 }
   10800           0 :                 if (flags & IORING_ENTER_SQ_WAKEUP)
   10801           0 :                         wake_up(&ctx->sq_data->wait);
   10802           0 :                 if (flags & IORING_ENTER_SQ_WAIT) {
   10803           0 :                         ret = io_sqpoll_wait_sq(ctx);
   10804           0 :                         if (ret)
   10805             :                                 goto out;
   10806             :                 }
   10807           0 :                 submitted = to_submit;
   10808           0 :         } else if (to_submit) {
   10809           0 :                 ret = io_uring_add_tctx_node(ctx);
   10810           0 :                 if (unlikely(ret))
   10811             :                         goto out;
   10812           0 :                 mutex_lock(&ctx->uring_lock);
   10813           0 :                 submitted = io_submit_sqes(ctx, to_submit);
   10814           0 :                 mutex_unlock(&ctx->uring_lock);
   10815             : 
   10816           0 :                 if (submitted != to_submit)
   10817             :                         goto out;
   10818             :         }
   10819           0 :         if (flags & IORING_ENTER_GETEVENTS) {
   10820             :                 const sigset_t __user *sig;
   10821             :                 struct __kernel_timespec __user *ts;
   10822             : 
   10823           0 :                 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
   10824           0 :                 if (unlikely(ret))
   10825             :                         goto out;
   10826             : 
   10827           0 :                 min_complete = min(min_complete, ctx->cq_entries);
   10828             : 
   10829             :                 /*
   10830             :                  * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
   10831             :                  * space applications don't need to do io completion events
   10832             :                  * polling again, they can rely on io_sq_thread to do polling
   10833             :                  * work, which can reduce cpu usage and uring_lock contention.
   10834             :                  */
   10835           0 :                 if (ctx->flags & IORING_SETUP_IOPOLL &&
   10836             :                     !(ctx->flags & IORING_SETUP_SQPOLL)) {
   10837           0 :                         ret = io_iopoll_check(ctx, min_complete);
   10838             :                 } else {
   10839           0 :                         ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
   10840             :                 }
   10841             :         }
   10842             : 
   10843             : out:
   10844           0 :         percpu_ref_put(&ctx->refs);
   10845             : out_fput:
   10846           0 :         if (!(flags & IORING_ENTER_REGISTERED_RING))
   10847           0 :                 fdput(f);
   10848           0 :         return submitted ? submitted : ret;
   10849             : }
   10850             : 
   10851             : #ifdef CONFIG_PROC_FS
   10852           0 : static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
   10853             :                 const struct cred *cred)
   10854             : {
   10855           0 :         struct user_namespace *uns = seq_user_ns(m);
   10856             :         struct group_info *gi;
   10857             :         kernel_cap_t cap;
   10858             :         unsigned __capi;
   10859             :         int g;
   10860             : 
   10861           0 :         seq_printf(m, "%5d\n", id);
   10862           0 :         seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
   10863           0 :         seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
   10864           0 :         seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
   10865           0 :         seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
   10866           0 :         seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
   10867           0 :         seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
   10868           0 :         seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
   10869           0 :         seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
   10870           0 :         seq_puts(m, "\n\tGroups:\t");
   10871           0 :         gi = cred->group_info;
   10872           0 :         for (g = 0; g < gi->ngroups; g++) {
   10873           0 :                 seq_put_decimal_ull(m, g ? " " : "",
   10874           0 :                                         from_kgid_munged(uns, gi->gid[g]));
   10875             :         }
   10876           0 :         seq_puts(m, "\n\tCapEff:\t");
   10877           0 :         cap = cred->cap_effective;
   10878           0 :         CAP_FOR_EACH_U32(__capi)
   10879           0 :                 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
   10880           0 :         seq_putc(m, '\n');
   10881           0 :         return 0;
   10882             : }
   10883             : 
   10884           0 : static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
   10885             :                                           struct seq_file *m)
   10886             : {
   10887           0 :         struct io_sq_data *sq = NULL;
   10888             :         struct io_overflow_cqe *ocqe;
   10889           0 :         struct io_rings *r = ctx->rings;
   10890           0 :         unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
   10891           0 :         unsigned int sq_head = READ_ONCE(r->sq.head);
   10892           0 :         unsigned int sq_tail = READ_ONCE(r->sq.tail);
   10893           0 :         unsigned int cq_head = READ_ONCE(r->cq.head);
   10894           0 :         unsigned int cq_tail = READ_ONCE(r->cq.tail);
   10895             :         unsigned int sq_entries, cq_entries;
   10896             :         bool has_lock;
   10897             :         unsigned int i;
   10898             : 
   10899             :         /*
   10900             :          * we may get imprecise sqe and cqe info if uring is actively running
   10901             :          * since we get cached_sq_head and cached_cq_tail without uring_lock
   10902             :          * and sq_tail and cq_head are changed by userspace. But it's ok since
   10903             :          * we usually use these info when it is stuck.
   10904             :          */
   10905           0 :         seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
   10906           0 :         seq_printf(m, "SqHead:\t%u\n", sq_head);
   10907           0 :         seq_printf(m, "SqTail:\t%u\n", sq_tail);
   10908           0 :         seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
   10909           0 :         seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
   10910           0 :         seq_printf(m, "CqHead:\t%u\n", cq_head);
   10911           0 :         seq_printf(m, "CqTail:\t%u\n", cq_tail);
   10912           0 :         seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
   10913           0 :         seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
   10914           0 :         sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
   10915           0 :         for (i = 0; i < sq_entries; i++) {
   10916           0 :                 unsigned int entry = i + sq_head;
   10917           0 :                 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
   10918             :                 struct io_uring_sqe *sqe;
   10919             : 
   10920           0 :                 if (sq_idx > sq_mask)
   10921           0 :                         continue;
   10922           0 :                 sqe = &ctx->sq_sqes[sq_idx];
   10923           0 :                 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
   10924           0 :                            sq_idx, sqe->opcode, sqe->fd, sqe->flags,
   10925             :                            sqe->user_data);
   10926             :         }
   10927           0 :         seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
   10928           0 :         cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
   10929           0 :         for (i = 0; i < cq_entries; i++) {
   10930           0 :                 unsigned int entry = i + cq_head;
   10931           0 :                 struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask];
   10932             : 
   10933           0 :                 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
   10934             :                            entry & cq_mask, cqe->user_data, cqe->res,
   10935             :                            cqe->flags);
   10936             :         }
   10937             : 
   10938             :         /*
   10939             :          * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
   10940             :          * since fdinfo case grabs it in the opposite direction of normal use
   10941             :          * cases. If we fail to get the lock, we just don't iterate any
   10942             :          * structures that could be going away outside the io_uring mutex.
   10943             :          */
   10944           0 :         has_lock = mutex_trylock(&ctx->uring_lock);
   10945             : 
   10946           0 :         if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
   10947           0 :                 sq = ctx->sq_data;
   10948           0 :                 if (!sq->thread)
   10949           0 :                         sq = NULL;
   10950             :         }
   10951             : 
   10952           0 :         seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
   10953           0 :         seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
   10954           0 :         seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
   10955           0 :         for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
   10956           0 :                 struct file *f = io_file_from_index(ctx, i);
   10957             : 
   10958           0 :                 if (f)
   10959           0 :                         seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
   10960             :                 else
   10961           0 :                         seq_printf(m, "%5u: <none>\n", i);
   10962             :         }
   10963           0 :         seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
   10964           0 :         for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
   10965           0 :                 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
   10966           0 :                 unsigned int len = buf->ubuf_end - buf->ubuf;
   10967             : 
   10968           0 :                 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
   10969             :         }
   10970           0 :         if (has_lock && !xa_empty(&ctx->personalities)) {
   10971             :                 unsigned long index;
   10972             :                 const struct cred *cred;
   10973             : 
   10974           0 :                 seq_printf(m, "Personalities:\n");
   10975           0 :                 xa_for_each(&ctx->personalities, index, cred)
   10976           0 :                         io_uring_show_cred(m, index, cred);
   10977             :         }
   10978           0 :         if (has_lock)
   10979           0 :                 mutex_unlock(&ctx->uring_lock);
   10980             : 
   10981           0 :         seq_puts(m, "PollList:\n");
   10982           0 :         spin_lock(&ctx->completion_lock);
   10983           0 :         for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
   10984           0 :                 struct hlist_head *list = &ctx->cancel_hash[i];
   10985             :                 struct io_kiocb *req;
   10986             : 
   10987           0 :                 hlist_for_each_entry(req, list, hash_node)
   10988           0 :                         seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
   10989           0 :                                         task_work_pending(req->task));
   10990             :         }
   10991             : 
   10992           0 :         seq_puts(m, "CqOverflowList:\n");
   10993           0 :         list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
   10994           0 :                 struct io_uring_cqe *cqe = &ocqe->cqe;
   10995             : 
   10996           0 :                 seq_printf(m, "  user_data=%llu, res=%d, flags=%x\n",
   10997             :                            cqe->user_data, cqe->res, cqe->flags);
   10998             : 
   10999             :         }
   11000             : 
   11001           0 :         spin_unlock(&ctx->completion_lock);
   11002           0 : }
   11003             : 
   11004           0 : static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
   11005             : {
   11006           0 :         struct io_ring_ctx *ctx = f->private_data;
   11007             : 
   11008           0 :         if (percpu_ref_tryget(&ctx->refs)) {
   11009           0 :                 __io_uring_show_fdinfo(ctx, m);
   11010           0 :                 percpu_ref_put(&ctx->refs);
   11011             :         }
   11012           0 : }
   11013             : #endif
   11014             : 
   11015             : static const struct file_operations io_uring_fops = {
   11016             :         .release        = io_uring_release,
   11017             :         .mmap           = io_uring_mmap,
   11018             : #ifndef CONFIG_MMU
   11019             :         .get_unmapped_area = io_uring_nommu_get_unmapped_area,
   11020             :         .mmap_capabilities = io_uring_nommu_mmap_capabilities,
   11021             : #endif
   11022             :         .poll           = io_uring_poll,
   11023             : #ifdef CONFIG_PROC_FS
   11024             :         .show_fdinfo    = io_uring_show_fdinfo,
   11025             : #endif
   11026             : };
   11027             : 
   11028           0 : static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
   11029             :                                          struct io_uring_params *p)
   11030             : {
   11031             :         struct io_rings *rings;
   11032             :         size_t size, sq_array_offset;
   11033             : 
   11034             :         /* make sure these are sane, as we already accounted them */
   11035           0 :         ctx->sq_entries = p->sq_entries;
   11036           0 :         ctx->cq_entries = p->cq_entries;
   11037             : 
   11038           0 :         size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
   11039           0 :         if (size == SIZE_MAX)
   11040             :                 return -EOVERFLOW;
   11041             : 
   11042           0 :         rings = io_mem_alloc(size);
   11043           0 :         if (!rings)
   11044             :                 return -ENOMEM;
   11045             : 
   11046           0 :         ctx->rings = rings;
   11047           0 :         ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
   11048           0 :         rings->sq_ring_mask = p->sq_entries - 1;
   11049           0 :         rings->cq_ring_mask = p->cq_entries - 1;
   11050           0 :         rings->sq_ring_entries = p->sq_entries;
   11051           0 :         rings->cq_ring_entries = p->cq_entries;
   11052             : 
   11053           0 :         size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
   11054           0 :         if (size == SIZE_MAX) {
   11055           0 :                 io_mem_free(ctx->rings);
   11056           0 :                 ctx->rings = NULL;
   11057             :                 return -EOVERFLOW;
   11058             :         }
   11059             : 
   11060           0 :         ctx->sq_sqes = io_mem_alloc(size);
   11061           0 :         if (!ctx->sq_sqes) {
   11062           0 :                 io_mem_free(ctx->rings);
   11063           0 :                 ctx->rings = NULL;
   11064             :                 return -ENOMEM;
   11065             :         }
   11066             : 
   11067             :         return 0;
   11068             : }
   11069             : 
   11070           0 : static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
   11071             : {
   11072             :         int ret, fd;
   11073             : 
   11074           0 :         fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
   11075           0 :         if (fd < 0)
   11076             :                 return fd;
   11077             : 
   11078           0 :         ret = io_uring_add_tctx_node(ctx);
   11079           0 :         if (ret) {
   11080           0 :                 put_unused_fd(fd);
   11081           0 :                 return ret;
   11082             :         }
   11083           0 :         fd_install(fd, file);
   11084           0 :         return fd;
   11085             : }
   11086             : 
   11087             : /*
   11088             :  * Allocate an anonymous fd, this is what constitutes the application
   11089             :  * visible backing of an io_uring instance. The application mmaps this
   11090             :  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
   11091             :  * we have to tie this fd to a socket for file garbage collection purposes.
   11092             :  */
   11093             : static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
   11094             : {
   11095             :         struct file *file;
   11096             : #if defined(CONFIG_UNIX)
   11097             :         int ret;
   11098             : 
   11099             :         ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
   11100             :                                 &ctx->ring_sock);
   11101             :         if (ret)
   11102             :                 return ERR_PTR(ret);
   11103             : #endif
   11104             : 
   11105           0 :         file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
   11106             :                                          O_RDWR | O_CLOEXEC, NULL);
   11107             : #if defined(CONFIG_UNIX)
   11108             :         if (IS_ERR(file)) {
   11109             :                 sock_release(ctx->ring_sock);
   11110             :                 ctx->ring_sock = NULL;
   11111             :         } else {
   11112             :                 ctx->ring_sock->file = file;
   11113             :         }
   11114             : #endif
   11115             :         return file;
   11116             : }
   11117             : 
   11118           0 : static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
   11119             :                                   struct io_uring_params __user *params)
   11120             : {
   11121             :         struct io_ring_ctx *ctx;
   11122             :         struct file *file;
   11123             :         int ret;
   11124             : 
   11125           0 :         if (!entries)
   11126             :                 return -EINVAL;
   11127           0 :         if (entries > IORING_MAX_ENTRIES) {
   11128           0 :                 if (!(p->flags & IORING_SETUP_CLAMP))
   11129             :                         return -EINVAL;
   11130             :                 entries = IORING_MAX_ENTRIES;
   11131             :         }
   11132             : 
   11133             :         /*
   11134             :          * Use twice as many entries for the CQ ring. It's possible for the
   11135             :          * application to drive a higher depth than the size of the SQ ring,
   11136             :          * since the sqes are only used at submission time. This allows for
   11137             :          * some flexibility in overcommitting a bit. If the application has
   11138             :          * set IORING_SETUP_CQSIZE, it will have passed in the desired number
   11139             :          * of CQ ring entries manually.
   11140             :          */
   11141           0 :         p->sq_entries = roundup_pow_of_two(entries);
   11142           0 :         if (p->flags & IORING_SETUP_CQSIZE) {
   11143             :                 /*
   11144             :                  * If IORING_SETUP_CQSIZE is set, we do the same roundup
   11145             :                  * to a power-of-two, if it isn't already. We do NOT impose
   11146             :                  * any cq vs sq ring sizing.
   11147             :                  */
   11148           0 :                 if (!p->cq_entries)
   11149             :                         return -EINVAL;
   11150           0 :                 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
   11151           0 :                         if (!(p->flags & IORING_SETUP_CLAMP))
   11152             :                                 return -EINVAL;
   11153           0 :                         p->cq_entries = IORING_MAX_CQ_ENTRIES;
   11154             :                 }
   11155           0 :                 p->cq_entries = roundup_pow_of_two(p->cq_entries);
   11156           0 :                 if (p->cq_entries < p->sq_entries)
   11157             :                         return -EINVAL;
   11158             :         } else {
   11159           0 :                 p->cq_entries = 2 * p->sq_entries;
   11160             :         }
   11161             : 
   11162           0 :         ctx = io_ring_ctx_alloc(p);
   11163           0 :         if (!ctx)
   11164             :                 return -ENOMEM;
   11165           0 :         ctx->compat = in_compat_syscall();
   11166           0 :         if (!capable(CAP_IPC_LOCK))
   11167           0 :                 ctx->user = get_uid(current_user());
   11168             : 
   11169             :         /*
   11170             :          * This is just grabbed for accounting purposes. When a process exits,
   11171             :          * the mm is exited and dropped before the files, hence we need to hang
   11172             :          * on to this mm purely for the purposes of being able to unaccount
   11173             :          * memory (locked/pinned vm). It's not used for anything else.
   11174             :          */
   11175           0 :         mmgrab(current->mm);
   11176           0 :         ctx->mm_account = current->mm;
   11177             : 
   11178           0 :         ret = io_allocate_scq_urings(ctx, p);
   11179           0 :         if (ret)
   11180             :                 goto err;
   11181             : 
   11182           0 :         ret = io_sq_offload_create(ctx, p);
   11183           0 :         if (ret)
   11184             :                 goto err;
   11185             :         /* always set a rsrc node */
   11186           0 :         ret = io_rsrc_node_switch_start(ctx);
   11187           0 :         if (ret)
   11188             :                 goto err;
   11189           0 :         io_rsrc_node_switch(ctx, NULL);
   11190             : 
   11191           0 :         memset(&p->sq_off, 0, sizeof(p->sq_off));
   11192           0 :         p->sq_off.head = offsetof(struct io_rings, sq.head);
   11193           0 :         p->sq_off.tail = offsetof(struct io_rings, sq.tail);
   11194           0 :         p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
   11195           0 :         p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
   11196           0 :         p->sq_off.flags = offsetof(struct io_rings, sq_flags);
   11197           0 :         p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
   11198           0 :         p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
   11199             : 
   11200           0 :         memset(&p->cq_off, 0, sizeof(p->cq_off));
   11201           0 :         p->cq_off.head = offsetof(struct io_rings, cq.head);
   11202           0 :         p->cq_off.tail = offsetof(struct io_rings, cq.tail);
   11203           0 :         p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
   11204           0 :         p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
   11205           0 :         p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
   11206           0 :         p->cq_off.cqes = offsetof(struct io_rings, cqes);
   11207           0 :         p->cq_off.flags = offsetof(struct io_rings, cq_flags);
   11208             : 
   11209           0 :         p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
   11210             :                         IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
   11211             :                         IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
   11212             :                         IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
   11213             :                         IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
   11214             :                         IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
   11215             :                         IORING_FEAT_LINKED_FILE;
   11216             : 
   11217           0 :         if (copy_to_user(params, p, sizeof(*p))) {
   11218             :                 ret = -EFAULT;
   11219             :                 goto err;
   11220             :         }
   11221             : 
   11222           0 :         file = io_uring_get_file(ctx);
   11223           0 :         if (IS_ERR(file)) {
   11224           0 :                 ret = PTR_ERR(file);
   11225           0 :                 goto err;
   11226             :         }
   11227             : 
   11228             :         /*
   11229             :          * Install ring fd as the very last thing, so we don't risk someone
   11230             :          * having closed it before we finish setup
   11231             :          */
   11232           0 :         ret = io_uring_install_fd(ctx, file);
   11233           0 :         if (ret < 0) {
   11234             :                 /* fput will clean it up */
   11235           0 :                 fput(file);
   11236           0 :                 return ret;
   11237             :         }
   11238             : 
   11239             :         trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
   11240             :         return ret;
   11241             : err:
   11242           0 :         io_ring_ctx_wait_and_kill(ctx);
   11243           0 :         return ret;
   11244             : }
   11245             : 
   11246             : /*
   11247             :  * Sets up an aio uring context, and returns the fd. Applications asks for a
   11248             :  * ring size, we return the actual sq/cq ring sizes (among other things) in the
   11249             :  * params structure passed in.
   11250             :  */
   11251           0 : static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
   11252             : {
   11253             :         struct io_uring_params p;
   11254             :         int i;
   11255             : 
   11256           0 :         if (copy_from_user(&p, params, sizeof(p)))
   11257             :                 return -EFAULT;
   11258           0 :         for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
   11259           0 :                 if (p.resv[i])
   11260             :                         return -EINVAL;
   11261             :         }
   11262             : 
   11263           0 :         if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
   11264             :                         IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
   11265             :                         IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
   11266             :                         IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL))
   11267             :                 return -EINVAL;
   11268             : 
   11269           0 :         return  io_uring_create(entries, &p, params);
   11270             : }
   11271             : 
   11272           0 : SYSCALL_DEFINE2(io_uring_setup, u32, entries,
   11273             :                 struct io_uring_params __user *, params)
   11274             : {
   11275           0 :         return io_uring_setup(entries, params);
   11276             : }
   11277             : 
   11278           0 : static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
   11279             :                            unsigned nr_args)
   11280             : {
   11281             :         struct io_uring_probe *p;
   11282             :         size_t size;
   11283             :         int i, ret;
   11284             : 
   11285           0 :         size = struct_size(p, ops, nr_args);
   11286           0 :         if (size == SIZE_MAX)
   11287             :                 return -EOVERFLOW;
   11288           0 :         p = kzalloc(size, GFP_KERNEL);
   11289           0 :         if (!p)
   11290             :                 return -ENOMEM;
   11291             : 
   11292           0 :         ret = -EFAULT;
   11293           0 :         if (copy_from_user(p, arg, size))
   11294             :                 goto out;
   11295           0 :         ret = -EINVAL;
   11296           0 :         if (memchr_inv(p, 0, size))
   11297             :                 goto out;
   11298             : 
   11299           0 :         p->last_op = IORING_OP_LAST - 1;
   11300           0 :         if (nr_args > IORING_OP_LAST)
   11301           0 :                 nr_args = IORING_OP_LAST;
   11302             : 
   11303           0 :         for (i = 0; i < nr_args; i++) {
   11304           0 :                 p->ops[i].op = i;
   11305           0 :                 if (!io_op_defs[i].not_supported)
   11306           0 :                         p->ops[i].flags = IO_URING_OP_SUPPORTED;
   11307             :         }
   11308           0 :         p->ops_len = i;
   11309             : 
   11310           0 :         ret = 0;
   11311           0 :         if (copy_to_user(arg, p, size))
   11312           0 :                 ret = -EFAULT;
   11313             : out:
   11314           0 :         kfree(p);
   11315             :         return ret;
   11316             : }
   11317             : 
   11318           0 : static int io_register_personality(struct io_ring_ctx *ctx)
   11319             : {
   11320             :         const struct cred *creds;
   11321             :         u32 id;
   11322             :         int ret;
   11323             : 
   11324           0 :         creds = get_current_cred();
   11325             : 
   11326           0 :         ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
   11327           0 :                         XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
   11328           0 :         if (ret < 0) {
   11329             :                 put_cred(creds);
   11330             :                 return ret;
   11331             :         }
   11332           0 :         return id;
   11333             : }
   11334             : 
   11335           0 : static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
   11336             :                                            void __user *arg, unsigned int nr_args)
   11337             : {
   11338             :         struct io_uring_restriction *res;
   11339             :         size_t size;
   11340             :         int i, ret;
   11341             : 
   11342             :         /* Restrictions allowed only if rings started disabled */
   11343           0 :         if (!(ctx->flags & IORING_SETUP_R_DISABLED))
   11344             :                 return -EBADFD;
   11345             : 
   11346             :         /* We allow only a single restrictions registration */
   11347           0 :         if (ctx->restrictions.registered)
   11348             :                 return -EBUSY;
   11349             : 
   11350           0 :         if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
   11351             :                 return -EINVAL;
   11352             : 
   11353           0 :         size = array_size(nr_args, sizeof(*res));
   11354           0 :         if (size == SIZE_MAX)
   11355             :                 return -EOVERFLOW;
   11356             : 
   11357           0 :         res = memdup_user(arg, size);
   11358           0 :         if (IS_ERR(res))
   11359           0 :                 return PTR_ERR(res);
   11360             : 
   11361             :         ret = 0;
   11362             : 
   11363           0 :         for (i = 0; i < nr_args; i++) {
   11364           0 :                 switch (res[i].opcode) {
   11365             :                 case IORING_RESTRICTION_REGISTER_OP:
   11366           0 :                         if (res[i].register_op >= IORING_REGISTER_LAST) {
   11367             :                                 ret = -EINVAL;
   11368             :                                 goto out;
   11369             :                         }
   11370             : 
   11371           0 :                         __set_bit(res[i].register_op,
   11372           0 :                                   ctx->restrictions.register_op);
   11373             :                         break;
   11374             :                 case IORING_RESTRICTION_SQE_OP:
   11375           0 :                         if (res[i].sqe_op >= IORING_OP_LAST) {
   11376             :                                 ret = -EINVAL;
   11377             :                                 goto out;
   11378             :                         }
   11379             : 
   11380           0 :                         __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
   11381             :                         break;
   11382             :                 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
   11383           0 :                         ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
   11384           0 :                         break;
   11385             :                 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
   11386           0 :                         ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
   11387           0 :                         break;
   11388             :                 default:
   11389             :                         ret = -EINVAL;
   11390             :                         goto out;
   11391             :                 }
   11392             :         }
   11393             : 
   11394             : out:
   11395             :         /* Reset all restrictions if an error happened */
   11396           0 :         if (ret != 0)
   11397           0 :                 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
   11398             :         else
   11399           0 :                 ctx->restrictions.registered = true;
   11400             : 
   11401           0 :         kfree(res);
   11402           0 :         return ret;
   11403             : }
   11404             : 
   11405           0 : static int io_register_enable_rings(struct io_ring_ctx *ctx)
   11406             : {
   11407           0 :         if (!(ctx->flags & IORING_SETUP_R_DISABLED))
   11408             :                 return -EBADFD;
   11409             : 
   11410           0 :         if (ctx->restrictions.registered)
   11411           0 :                 ctx->restricted = 1;
   11412             : 
   11413           0 :         ctx->flags &= ~IORING_SETUP_R_DISABLED;
   11414           0 :         if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
   11415           0 :                 wake_up(&ctx->sq_data->wait);
   11416             :         return 0;
   11417             : }
   11418             : 
   11419           0 : static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
   11420             :                                      struct io_uring_rsrc_update2 *up,
   11421             :                                      unsigned nr_args)
   11422             : {
   11423             :         __u32 tmp;
   11424             :         int err;
   11425             : 
   11426           0 :         if (check_add_overflow(up->offset, nr_args, &tmp))
   11427             :                 return -EOVERFLOW;
   11428           0 :         err = io_rsrc_node_switch_start(ctx);
   11429           0 :         if (err)
   11430             :                 return err;
   11431             : 
   11432           0 :         switch (type) {
   11433             :         case IORING_RSRC_FILE:
   11434           0 :                 return __io_sqe_files_update(ctx, up, nr_args);
   11435             :         case IORING_RSRC_BUFFER:
   11436           0 :                 return __io_sqe_buffers_update(ctx, up, nr_args);
   11437             :         }
   11438             :         return -EINVAL;
   11439             : }
   11440             : 
   11441           0 : static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
   11442             :                                     unsigned nr_args)
   11443             : {
   11444             :         struct io_uring_rsrc_update2 up;
   11445             : 
   11446           0 :         if (!nr_args)
   11447             :                 return -EINVAL;
   11448           0 :         memset(&up, 0, sizeof(up));
   11449           0 :         if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
   11450             :                 return -EFAULT;
   11451           0 :         if (up.resv || up.resv2)
   11452             :                 return -EINVAL;
   11453           0 :         return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
   11454             : }
   11455             : 
   11456           0 : static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
   11457             :                                    unsigned size, unsigned type)
   11458             : {
   11459             :         struct io_uring_rsrc_update2 up;
   11460             : 
   11461           0 :         if (size != sizeof(up))
   11462             :                 return -EINVAL;
   11463           0 :         if (copy_from_user(&up, arg, sizeof(up)))
   11464             :                 return -EFAULT;
   11465           0 :         if (!up.nr || up.resv || up.resv2)
   11466             :                 return -EINVAL;
   11467           0 :         return __io_register_rsrc_update(ctx, type, &up, up.nr);
   11468             : }
   11469             : 
   11470           0 : static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
   11471             :                             unsigned int size, unsigned int type)
   11472             : {
   11473             :         struct io_uring_rsrc_register rr;
   11474             : 
   11475             :         /* keep it extendible */
   11476           0 :         if (size != sizeof(rr))
   11477             :                 return -EINVAL;
   11478             : 
   11479           0 :         memset(&rr, 0, sizeof(rr));
   11480           0 :         if (copy_from_user(&rr, arg, size))
   11481             :                 return -EFAULT;
   11482           0 :         if (!rr.nr || rr.resv || rr.resv2)
   11483             :                 return -EINVAL;
   11484             : 
   11485           0 :         switch (type) {
   11486             :         case IORING_RSRC_FILE:
   11487           0 :                 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
   11488           0 :                                              rr.nr, u64_to_user_ptr(rr.tags));
   11489             :         case IORING_RSRC_BUFFER:
   11490           0 :                 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
   11491           0 :                                                rr.nr, u64_to_user_ptr(rr.tags));
   11492             :         }
   11493             :         return -EINVAL;
   11494             : }
   11495             : 
   11496           0 : static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
   11497             :                                        void __user *arg, unsigned len)
   11498             : {
   11499           0 :         struct io_uring_task *tctx = current->io_uring;
   11500             :         cpumask_var_t new_mask;
   11501             :         int ret;
   11502             : 
   11503           0 :         if (!tctx || !tctx->io_wq)
   11504             :                 return -EINVAL;
   11505             : 
   11506           0 :         if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
   11507             :                 return -ENOMEM;
   11508             : 
   11509           0 :         cpumask_clear(new_mask);
   11510           0 :         if (len > cpumask_size())
   11511           0 :                 len = cpumask_size();
   11512             : 
   11513             :         if (in_compat_syscall()) {
   11514             :                 ret = compat_get_bitmap(cpumask_bits(new_mask),
   11515             :                                         (const compat_ulong_t __user *)arg,
   11516             :                                         len * 8 /* CHAR_BIT */);
   11517             :         } else {
   11518           0 :                 ret = copy_from_user(new_mask, arg, len);
   11519             :         }
   11520             : 
   11521           0 :         if (ret) {
   11522             :                 free_cpumask_var(new_mask);
   11523             :                 return -EFAULT;
   11524             :         }
   11525             : 
   11526           0 :         ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
   11527           0 :         free_cpumask_var(new_mask);
   11528             :         return ret;
   11529             : }
   11530             : 
   11531           0 : static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
   11532             : {
   11533           0 :         struct io_uring_task *tctx = current->io_uring;
   11534             : 
   11535           0 :         if (!tctx || !tctx->io_wq)
   11536             :                 return -EINVAL;
   11537             : 
   11538           0 :         return io_wq_cpu_affinity(tctx->io_wq, NULL);
   11539             : }
   11540             : 
   11541           0 : static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
   11542             :                                                void __user *arg)
   11543             :         __must_hold(&ctx->uring_lock)
   11544             : {
   11545             :         struct io_tctx_node *node;
   11546           0 :         struct io_uring_task *tctx = NULL;
   11547           0 :         struct io_sq_data *sqd = NULL;
   11548             :         __u32 new_count[2];
   11549             :         int i, ret;
   11550             : 
   11551           0 :         if (copy_from_user(new_count, arg, sizeof(new_count)))
   11552             :                 return -EFAULT;
   11553           0 :         for (i = 0; i < ARRAY_SIZE(new_count); i++)
   11554           0 :                 if (new_count[i] > INT_MAX)
   11555             :                         return -EINVAL;
   11556             : 
   11557           0 :         if (ctx->flags & IORING_SETUP_SQPOLL) {
   11558           0 :                 sqd = ctx->sq_data;
   11559           0 :                 if (sqd) {
   11560             :                         /*
   11561             :                          * Observe the correct sqd->lock -> ctx->uring_lock
   11562             :                          * ordering. Fine to drop uring_lock here, we hold
   11563             :                          * a ref to the ctx.
   11564             :                          */
   11565           0 :                         refcount_inc(&sqd->refs);
   11566           0 :                         mutex_unlock(&ctx->uring_lock);
   11567           0 :                         mutex_lock(&sqd->lock);
   11568           0 :                         mutex_lock(&ctx->uring_lock);
   11569           0 :                         if (sqd->thread)
   11570           0 :                                 tctx = sqd->thread->io_uring;
   11571             :                 }
   11572             :         } else {
   11573           0 :                 tctx = current->io_uring;
   11574             :         }
   11575             : 
   11576             :         BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
   11577             : 
   11578           0 :         for (i = 0; i < ARRAY_SIZE(new_count); i++)
   11579           0 :                 if (new_count[i])
   11580           0 :                         ctx->iowq_limits[i] = new_count[i];
   11581           0 :         ctx->iowq_limits_set = true;
   11582             : 
   11583           0 :         if (tctx && tctx->io_wq) {
   11584           0 :                 ret = io_wq_max_workers(tctx->io_wq, new_count);
   11585           0 :                 if (ret)
   11586             :                         goto err;
   11587             :         } else {
   11588           0 :                 memset(new_count, 0, sizeof(new_count));
   11589             :         }
   11590             : 
   11591           0 :         if (sqd) {
   11592           0 :                 mutex_unlock(&sqd->lock);
   11593           0 :                 io_put_sq_data(sqd);
   11594             :         }
   11595             : 
   11596           0 :         if (copy_to_user(arg, new_count, sizeof(new_count)))
   11597             :                 return -EFAULT;
   11598             : 
   11599             :         /* that's it for SQPOLL, only the SQPOLL task creates requests */
   11600           0 :         if (sqd)
   11601             :                 return 0;
   11602             : 
   11603             :         /* now propagate the restriction to all registered users */
   11604           0 :         list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
   11605           0 :                 struct io_uring_task *tctx = node->task->io_uring;
   11606             : 
   11607           0 :                 if (WARN_ON_ONCE(!tctx->io_wq))
   11608           0 :                         continue;
   11609             : 
   11610           0 :                 for (i = 0; i < ARRAY_SIZE(new_count); i++)
   11611           0 :                         new_count[i] = ctx->iowq_limits[i];
   11612             :                 /* ignore errors, it always returns zero anyway */
   11613           0 :                 (void)io_wq_max_workers(tctx->io_wq, new_count);
   11614             :         }
   11615             :         return 0;
   11616             : err:
   11617           0 :         if (sqd) {
   11618           0 :                 mutex_unlock(&sqd->lock);
   11619           0 :                 io_put_sq_data(sqd);
   11620             :         }
   11621             :         return ret;
   11622             : }
   11623             : 
   11624           0 : static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
   11625             :                                void __user *arg, unsigned nr_args)
   11626             :         __releases(ctx->uring_lock)
   11627             :         __acquires(ctx->uring_lock)
   11628             : {
   11629             :         int ret;
   11630             : 
   11631             :         /*
   11632             :          * We're inside the ring mutex, if the ref is already dying, then
   11633             :          * someone else killed the ctx or is already going through
   11634             :          * io_uring_register().
   11635             :          */
   11636           0 :         if (percpu_ref_is_dying(&ctx->refs))
   11637             :                 return -ENXIO;
   11638             : 
   11639           0 :         if (ctx->restricted) {
   11640           0 :                 if (opcode >= IORING_REGISTER_LAST)
   11641             :                         return -EINVAL;
   11642           0 :                 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
   11643           0 :                 if (!test_bit(opcode, ctx->restrictions.register_op))
   11644             :                         return -EACCES;
   11645             :         }
   11646             : 
   11647           0 :         switch (opcode) {
   11648             :         case IORING_REGISTER_BUFFERS:
   11649           0 :                 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
   11650           0 :                 break;
   11651             :         case IORING_UNREGISTER_BUFFERS:
   11652           0 :                 ret = -EINVAL;
   11653           0 :                 if (arg || nr_args)
   11654             :                         break;
   11655           0 :                 ret = io_sqe_buffers_unregister(ctx);
   11656           0 :                 break;
   11657             :         case IORING_REGISTER_FILES:
   11658           0 :                 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
   11659           0 :                 break;
   11660             :         case IORING_UNREGISTER_FILES:
   11661           0 :                 ret = -EINVAL;
   11662           0 :                 if (arg || nr_args)
   11663             :                         break;
   11664           0 :                 ret = io_sqe_files_unregister(ctx);
   11665           0 :                 break;
   11666             :         case IORING_REGISTER_FILES_UPDATE:
   11667           0 :                 ret = io_register_files_update(ctx, arg, nr_args);
   11668           0 :                 break;
   11669             :         case IORING_REGISTER_EVENTFD:
   11670           0 :                 ret = -EINVAL;
   11671           0 :                 if (nr_args != 1)
   11672             :                         break;
   11673           0 :                 ret = io_eventfd_register(ctx, arg, 0);
   11674           0 :                 break;
   11675             :         case IORING_REGISTER_EVENTFD_ASYNC:
   11676           0 :                 ret = -EINVAL;
   11677           0 :                 if (nr_args != 1)
   11678             :                         break;
   11679           0 :                 ret = io_eventfd_register(ctx, arg, 1);
   11680           0 :                 break;
   11681             :         case IORING_UNREGISTER_EVENTFD:
   11682           0 :                 ret = -EINVAL;
   11683           0 :                 if (arg || nr_args)
   11684             :                         break;
   11685             :                 ret = io_eventfd_unregister(ctx);
   11686             :                 break;
   11687             :         case IORING_REGISTER_PROBE:
   11688           0 :                 ret = -EINVAL;
   11689           0 :                 if (!arg || nr_args > 256)
   11690             :                         break;
   11691           0 :                 ret = io_probe(ctx, arg, nr_args);
   11692           0 :                 break;
   11693             :         case IORING_REGISTER_PERSONALITY:
   11694           0 :                 ret = -EINVAL;
   11695           0 :                 if (arg || nr_args)
   11696             :                         break;
   11697           0 :                 ret = io_register_personality(ctx);
   11698           0 :                 break;
   11699             :         case IORING_UNREGISTER_PERSONALITY:
   11700           0 :                 ret = -EINVAL;
   11701           0 :                 if (arg)
   11702             :                         break;
   11703           0 :                 ret = io_unregister_personality(ctx, nr_args);
   11704           0 :                 break;
   11705             :         case IORING_REGISTER_ENABLE_RINGS:
   11706           0 :                 ret = -EINVAL;
   11707           0 :                 if (arg || nr_args)
   11708             :                         break;
   11709           0 :                 ret = io_register_enable_rings(ctx);
   11710           0 :                 break;
   11711             :         case IORING_REGISTER_RESTRICTIONS:
   11712           0 :                 ret = io_register_restrictions(ctx, arg, nr_args);
   11713           0 :                 break;
   11714             :         case IORING_REGISTER_FILES2:
   11715           0 :                 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
   11716           0 :                 break;
   11717             :         case IORING_REGISTER_FILES_UPDATE2:
   11718           0 :                 ret = io_register_rsrc_update(ctx, arg, nr_args,
   11719             :                                               IORING_RSRC_FILE);
   11720           0 :                 break;
   11721             :         case IORING_REGISTER_BUFFERS2:
   11722           0 :                 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
   11723           0 :                 break;
   11724             :         case IORING_REGISTER_BUFFERS_UPDATE:
   11725           0 :                 ret = io_register_rsrc_update(ctx, arg, nr_args,
   11726             :                                               IORING_RSRC_BUFFER);
   11727           0 :                 break;
   11728             :         case IORING_REGISTER_IOWQ_AFF:
   11729           0 :                 ret = -EINVAL;
   11730           0 :                 if (!arg || !nr_args)
   11731             :                         break;
   11732           0 :                 ret = io_register_iowq_aff(ctx, arg, nr_args);
   11733           0 :                 break;
   11734             :         case IORING_UNREGISTER_IOWQ_AFF:
   11735           0 :                 ret = -EINVAL;
   11736           0 :                 if (arg || nr_args)
   11737             :                         break;
   11738           0 :                 ret = io_unregister_iowq_aff(ctx);
   11739           0 :                 break;
   11740             :         case IORING_REGISTER_IOWQ_MAX_WORKERS:
   11741           0 :                 ret = -EINVAL;
   11742           0 :                 if (!arg || nr_args != 2)
   11743             :                         break;
   11744           0 :                 ret = io_register_iowq_max_workers(ctx, arg);
   11745           0 :                 break;
   11746             :         case IORING_REGISTER_RING_FDS:
   11747           0 :                 ret = io_ringfd_register(ctx, arg, nr_args);
   11748           0 :                 break;
   11749             :         case IORING_UNREGISTER_RING_FDS:
   11750           0 :                 ret = io_ringfd_unregister(ctx, arg, nr_args);
   11751           0 :                 break;
   11752             :         default:
   11753             :                 ret = -EINVAL;
   11754             :                 break;
   11755             :         }
   11756             : 
   11757             :         return ret;
   11758             : }
   11759             : 
   11760           0 : SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
   11761             :                 void __user *, arg, unsigned int, nr_args)
   11762             : {
   11763             :         struct io_ring_ctx *ctx;
   11764           0 :         long ret = -EBADF;
   11765             :         struct fd f;
   11766             : 
   11767           0 :         f = fdget(fd);
   11768           0 :         if (!f.file)
   11769             :                 return -EBADF;
   11770             : 
   11771           0 :         ret = -EOPNOTSUPP;
   11772           0 :         if (f.file->f_op != &io_uring_fops)
   11773             :                 goto out_fput;
   11774             : 
   11775           0 :         ctx = f.file->private_data;
   11776             : 
   11777           0 :         io_run_task_work();
   11778             : 
   11779           0 :         mutex_lock(&ctx->uring_lock);
   11780           0 :         ret = __io_uring_register(ctx, opcode, arg, nr_args);
   11781           0 :         mutex_unlock(&ctx->uring_lock);
   11782           0 :         trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
   11783             : out_fput:
   11784           0 :         fdput(f);
   11785             :         return ret;
   11786             : }
   11787             : 
   11788           1 : static int __init io_uring_init(void)
   11789             : {
   11790             : #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
   11791             :         BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
   11792             :         BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
   11793             : } while (0)
   11794             : 
   11795             : #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
   11796             :         __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
   11797             :         BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
   11798             :         BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
   11799             :         BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
   11800             :         BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
   11801             :         BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
   11802             :         BUILD_BUG_SQE_ELEM(8,  __u64,  off);
   11803             :         BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
   11804             :         BUILD_BUG_SQE_ELEM(16, __u64,  addr);
   11805             :         BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
   11806             :         BUILD_BUG_SQE_ELEM(24, __u32,  len);
   11807             :         BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
   11808             :         BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
   11809             :         BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
   11810             :         BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
   11811             :         BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
   11812             :         BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
   11813             :         BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
   11814             :         BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
   11815             :         BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
   11816             :         BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
   11817             :         BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
   11818             :         BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
   11819             :         BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
   11820             :         BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
   11821             :         BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
   11822             :         BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
   11823             :         BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
   11824             :         BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
   11825             :         BUILD_BUG_SQE_ELEM(42, __u16,  personality);
   11826             :         BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
   11827             :         BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
   11828             : 
   11829             :         BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
   11830             :                      sizeof(struct io_uring_rsrc_update));
   11831             :         BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
   11832             :                      sizeof(struct io_uring_rsrc_update2));
   11833             : 
   11834             :         /* ->buf_index is u16 */
   11835             :         BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
   11836             : 
   11837             :         /* should fit into one byte */
   11838             :         BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
   11839             :         BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
   11840             :         BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
   11841             : 
   11842             :         BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
   11843             :         BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
   11844             : 
   11845           1 :         req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
   11846             :                                 SLAB_ACCOUNT);
   11847           1 :         return 0;
   11848             : };
   11849             : __initcall(io_uring_init);

Generated by: LCOV version 1.14