LCOV - code coverage report
Current view: top level - block - blk-sysfs.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 311 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 61 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to sysfs handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/slab.h>
       7             : #include <linux/module.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/backing-dev.h>
      11             : #include <linux/blktrace_api.h>
      12             : #include <linux/blk-mq.h>
      13             : #include <linux/debugfs.h>
      14             : 
      15             : #include "blk.h"
      16             : #include "blk-mq.h"
      17             : #include "blk-mq-debugfs.h"
      18             : #include "blk-mq-sched.h"
      19             : #include "blk-wbt.h"
      20             : #include "blk-cgroup.h"
      21             : #include "blk-throttle.h"
      22             : 
      23             : struct queue_sysfs_entry {
      24             :         struct attribute attr;
      25             :         ssize_t (*show)(struct request_queue *, char *);
      26             :         ssize_t (*store)(struct request_queue *, const char *, size_t);
      27             : };
      28             : 
      29             : static ssize_t
      30             : queue_var_show(unsigned long var, char *page)
      31             : {
      32           0 :         return sprintf(page, "%lu\n", var);
      33             : }
      34             : 
      35             : static ssize_t
      36             : queue_var_store(unsigned long *var, const char *page, size_t count)
      37             : {
      38             :         int err;
      39             :         unsigned long v;
      40             : 
      41           0 :         err = kstrtoul(page, 10, &v);
      42           0 :         if (err || v > UINT_MAX)
      43             :                 return -EINVAL;
      44             : 
      45           0 :         *var = v;
      46             : 
      47           0 :         return count;
      48             : }
      49             : 
      50             : static ssize_t queue_var_store64(s64 *var, const char *page)
      51             : {
      52             :         int err;
      53             :         s64 v;
      54             : 
      55           0 :         err = kstrtos64(page, 10, &v);
      56           0 :         if (err < 0)
      57           0 :                 return err;
      58             : 
      59           0 :         *var = v;
      60             :         return 0;
      61             : }
      62             : 
      63           0 : static ssize_t queue_requests_show(struct request_queue *q, char *page)
      64             : {
      65           0 :         return queue_var_show(q->nr_requests, page);
      66             : }
      67             : 
      68             : static ssize_t
      69           0 : queue_requests_store(struct request_queue *q, const char *page, size_t count)
      70             : {
      71             :         unsigned long nr;
      72             :         int ret, err;
      73             : 
      74           0 :         if (!queue_is_mq(q))
      75             :                 return -EINVAL;
      76             : 
      77           0 :         ret = queue_var_store(&nr, page, count);
      78           0 :         if (ret < 0)
      79           0 :                 return ret;
      80             : 
      81           0 :         if (nr < BLKDEV_MIN_RQ)
      82           0 :                 nr = BLKDEV_MIN_RQ;
      83             : 
      84           0 :         err = blk_mq_update_nr_requests(q, nr);
      85           0 :         if (err)
      86           0 :                 return err;
      87             : 
      88           0 :         return ret;
      89             : }
      90             : 
      91           0 : static ssize_t queue_ra_show(struct request_queue *q, char *page)
      92             : {
      93             :         unsigned long ra_kb;
      94             : 
      95           0 :         if (!q->disk)
      96             :                 return -EINVAL;
      97           0 :         ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
      98           0 :         return queue_var_show(ra_kb, page);
      99             : }
     100             : 
     101             : static ssize_t
     102           0 : queue_ra_store(struct request_queue *q, const char *page, size_t count)
     103             : {
     104             :         unsigned long ra_kb;
     105             :         ssize_t ret;
     106             : 
     107           0 :         if (!q->disk)
     108             :                 return -EINVAL;
     109           0 :         ret = queue_var_store(&ra_kb, page, count);
     110           0 :         if (ret < 0)
     111             :                 return ret;
     112           0 :         q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
     113           0 :         return ret;
     114             : }
     115             : 
     116           0 : static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
     117             : {
     118           0 :         int max_sectors_kb = queue_max_sectors(q) >> 1;
     119             : 
     120           0 :         return queue_var_show(max_sectors_kb, page);
     121             : }
     122             : 
     123           0 : static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
     124             : {
     125           0 :         return queue_var_show(queue_max_segments(q), page);
     126             : }
     127             : 
     128           0 : static ssize_t queue_max_discard_segments_show(struct request_queue *q,
     129             :                 char *page)
     130             : {
     131           0 :         return queue_var_show(queue_max_discard_segments(q), page);
     132             : }
     133             : 
     134           0 : static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
     135             : {
     136           0 :         return queue_var_show(q->limits.max_integrity_segments, page);
     137             : }
     138             : 
     139           0 : static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
     140             : {
     141           0 :         return queue_var_show(queue_max_segment_size(q), page);
     142             : }
     143             : 
     144           0 : static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
     145             : {
     146           0 :         return queue_var_show(queue_logical_block_size(q), page);
     147             : }
     148             : 
     149           0 : static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
     150             : {
     151           0 :         return queue_var_show(queue_physical_block_size(q), page);
     152             : }
     153             : 
     154           0 : static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
     155             : {
     156           0 :         return queue_var_show(q->limits.chunk_sectors, page);
     157             : }
     158             : 
     159           0 : static ssize_t queue_io_min_show(struct request_queue *q, char *page)
     160             : {
     161           0 :         return queue_var_show(queue_io_min(q), page);
     162             : }
     163             : 
     164           0 : static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
     165             : {
     166           0 :         return queue_var_show(queue_io_opt(q), page);
     167             : }
     168             : 
     169           0 : static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
     170             : {
     171           0 :         return queue_var_show(q->limits.discard_granularity, page);
     172             : }
     173             : 
     174           0 : static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
     175             : {
     176             : 
     177           0 :         return sprintf(page, "%llu\n",
     178           0 :                 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
     179             : }
     180             : 
     181           0 : static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
     182             : {
     183           0 :         return sprintf(page, "%llu\n",
     184           0 :                        (unsigned long long)q->limits.max_discard_sectors << 9);
     185             : }
     186             : 
     187           0 : static ssize_t queue_discard_max_store(struct request_queue *q,
     188             :                                        const char *page, size_t count)
     189             : {
     190             :         unsigned long max_discard;
     191           0 :         ssize_t ret = queue_var_store(&max_discard, page, count);
     192             : 
     193           0 :         if (ret < 0)
     194             :                 return ret;
     195             : 
     196           0 :         if (max_discard & (q->limits.discard_granularity - 1))
     197             :                 return -EINVAL;
     198             : 
     199           0 :         max_discard >>= 9;
     200           0 :         if (max_discard > UINT_MAX)
     201             :                 return -EINVAL;
     202             : 
     203           0 :         if (max_discard > q->limits.max_hw_discard_sectors)
     204           0 :                 max_discard = q->limits.max_hw_discard_sectors;
     205             : 
     206           0 :         q->limits.max_discard_sectors = max_discard;
     207           0 :         return ret;
     208             : }
     209             : 
     210           0 : static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
     211             : {
     212           0 :         return queue_var_show(0, page);
     213             : }
     214             : 
     215           0 : static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
     216             : {
     217           0 :         return queue_var_show(0, page);
     218             : }
     219             : 
     220           0 : static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
     221             : {
     222           0 :         return sprintf(page, "%llu\n",
     223           0 :                 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
     224             : }
     225             : 
     226           0 : static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
     227             :                                                  char *page)
     228             : {
     229           0 :         return queue_var_show(queue_zone_write_granularity(q), page);
     230             : }
     231             : 
     232           0 : static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
     233             : {
     234           0 :         unsigned long long max_sectors = q->limits.max_zone_append_sectors;
     235             : 
     236           0 :         return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
     237             : }
     238             : 
     239             : static ssize_t
     240           0 : queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
     241             : {
     242             :         unsigned long max_sectors_kb,
     243           0 :                 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
     244           0 :                         page_kb = 1 << (PAGE_SHIFT - 10);
     245           0 :         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
     246             : 
     247           0 :         if (ret < 0)
     248             :                 return ret;
     249             : 
     250           0 :         max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
     251             :                                          q->limits.max_dev_sectors >> 1);
     252             : 
     253           0 :         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
     254             :                 return -EINVAL;
     255             : 
     256           0 :         spin_lock_irq(&q->queue_lock);
     257           0 :         q->limits.max_sectors = max_sectors_kb << 1;
     258           0 :         if (q->disk)
     259           0 :                 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
     260           0 :         spin_unlock_irq(&q->queue_lock);
     261             : 
     262           0 :         return ret;
     263             : }
     264             : 
     265           0 : static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
     266             : {
     267           0 :         int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
     268             : 
     269           0 :         return queue_var_show(max_hw_sectors_kb, page);
     270             : }
     271             : 
     272           0 : static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
     273             : {
     274           0 :         return queue_var_show(q->limits.virt_boundary_mask, page);
     275             : }
     276             : 
     277             : #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
     278             : static ssize_t                                                          \
     279             : queue_##name##_show(struct request_queue *q, char *page)                \
     280             : {                                                                       \
     281             :         int bit;                                                        \
     282             :         bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);              \
     283             :         return queue_var_show(neg ? !bit : bit, page);                  \
     284             : }                                                                       \
     285             : static ssize_t                                                          \
     286             : queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
     287             : {                                                                       \
     288             :         unsigned long val;                                              \
     289             :         ssize_t ret;                                                    \
     290             :         ret = queue_var_store(&val, page, count);                   \
     291             :         if (ret < 0)                                                 \
     292             :                  return ret;                                            \
     293             :         if (neg)                                                        \
     294             :                 val = !val;                                             \
     295             :                                                                         \
     296             :         if (val)                                                        \
     297             :                 blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
     298             :         else                                                            \
     299             :                 blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
     300             :         return ret;                                                     \
     301             : }
     302             : 
     303           0 : QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
     304           0 : QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
     305           0 : QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
     306           0 : QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
     307             : #undef QUEUE_SYSFS_BIT_FNS
     308             : 
     309           0 : static ssize_t queue_zoned_show(struct request_queue *q, char *page)
     310             : {
     311           0 :         switch (blk_queue_zoned_model(q)) {
     312             :         case BLK_ZONED_HA:
     313             :                 return sprintf(page, "host-aware\n");
     314             :         case BLK_ZONED_HM:
     315             :                 return sprintf(page, "host-managed\n");
     316             :         default:
     317           0 :                 return sprintf(page, "none\n");
     318             :         }
     319             : }
     320             : 
     321           0 : static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
     322             : {
     323           0 :         return queue_var_show(blk_queue_nr_zones(q), page);
     324             : }
     325             : 
     326           0 : static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
     327             : {
     328           0 :         return queue_var_show(queue_max_open_zones(q), page);
     329             : }
     330             : 
     331           0 : static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
     332             : {
     333           0 :         return queue_var_show(queue_max_active_zones(q), page);
     334             : }
     335             : 
     336           0 : static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
     337             : {
     338           0 :         return queue_var_show((blk_queue_nomerges(q) << 1) |
     339           0 :                                blk_queue_noxmerges(q), page);
     340             : }
     341             : 
     342           0 : static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
     343             :                                     size_t count)
     344             : {
     345             :         unsigned long nm;
     346           0 :         ssize_t ret = queue_var_store(&nm, page, count);
     347             : 
     348           0 :         if (ret < 0)
     349             :                 return ret;
     350             : 
     351           0 :         blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
     352           0 :         blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
     353           0 :         if (nm == 2)
     354           0 :                 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
     355           0 :         else if (nm)
     356           0 :                 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
     357             : 
     358             :         return ret;
     359             : }
     360             : 
     361           0 : static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
     362             : {
     363           0 :         bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
     364           0 :         bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
     365             : 
     366           0 :         return queue_var_show(set << force, page);
     367             : }
     368             : 
     369             : static ssize_t
     370           0 : queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
     371             : {
     372           0 :         ssize_t ret = -EINVAL;
     373             : #ifdef CONFIG_SMP
     374             :         unsigned long val;
     375             : 
     376             :         ret = queue_var_store(&val, page, count);
     377             :         if (ret < 0)
     378             :                 return ret;
     379             : 
     380             :         if (val == 2) {
     381             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
     382             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
     383             :         } else if (val == 1) {
     384             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
     385             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
     386             :         } else if (val == 0) {
     387             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
     388             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
     389             :         }
     390             : #endif
     391           0 :         return ret;
     392             : }
     393             : 
     394           0 : static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
     395             : {
     396             :         int val;
     397             : 
     398           0 :         if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
     399             :                 val = BLK_MQ_POLL_CLASSIC;
     400             :         else
     401           0 :                 val = q->poll_nsec / 1000;
     402             : 
     403           0 :         return sprintf(page, "%d\n", val);
     404             : }
     405             : 
     406           0 : static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
     407             :                                 size_t count)
     408             : {
     409             :         int err, val;
     410             : 
     411           0 :         if (!q->mq_ops || !q->mq_ops->poll)
     412             :                 return -EINVAL;
     413             : 
     414           0 :         err = kstrtoint(page, 10, &val);
     415           0 :         if (err < 0)
     416           0 :                 return err;
     417             : 
     418           0 :         if (val == BLK_MQ_POLL_CLASSIC)
     419           0 :                 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
     420           0 :         else if (val >= 0)
     421           0 :                 q->poll_nsec = val * 1000;
     422             :         else
     423             :                 return -EINVAL;
     424             : 
     425           0 :         return count;
     426             : }
     427             : 
     428           0 : static ssize_t queue_poll_show(struct request_queue *q, char *page)
     429             : {
     430           0 :         return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
     431             : }
     432             : 
     433           0 : static ssize_t queue_poll_store(struct request_queue *q, const char *page,
     434             :                                 size_t count)
     435             : {
     436           0 :         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
     437             :                 return -EINVAL;
     438           0 :         pr_info_ratelimited("writes to the poll attribute are ignored.\n");
     439           0 :         pr_info_ratelimited("please use driver specific parameters instead.\n");
     440           0 :         return count;
     441             : }
     442             : 
     443           0 : static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
     444             : {
     445           0 :         return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
     446             : }
     447             : 
     448           0 : static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
     449             :                                   size_t count)
     450             : {
     451             :         unsigned int val;
     452             :         int err;
     453             : 
     454           0 :         err = kstrtou32(page, 10, &val);
     455           0 :         if (err || val == 0)
     456             :                 return -EINVAL;
     457             : 
     458           0 :         blk_queue_rq_timeout(q, msecs_to_jiffies(val));
     459             : 
     460           0 :         return count;
     461             : }
     462             : 
     463           0 : static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
     464             : {
     465           0 :         if (!wbt_rq_qos(q))
     466             :                 return -EINVAL;
     467             : 
     468           0 :         return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
     469             : }
     470             : 
     471           0 : static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
     472             :                                   size_t count)
     473             : {
     474             :         struct rq_qos *rqos;
     475             :         ssize_t ret;
     476             :         s64 val;
     477             : 
     478           0 :         ret = queue_var_store64(&val, page);
     479           0 :         if (ret < 0)
     480             :                 return ret;
     481           0 :         if (val < -1)
     482             :                 return -EINVAL;
     483             : 
     484           0 :         rqos = wbt_rq_qos(q);
     485           0 :         if (!rqos) {
     486             :                 ret = wbt_init(q);
     487             :                 if (ret)
     488             :                         return ret;
     489             :         }
     490             : 
     491           0 :         if (val == -1)
     492             :                 val = wbt_default_latency_nsec(q);
     493           0 :         else if (val >= 0)
     494           0 :                 val *= 1000ULL;
     495             : 
     496           0 :         if (wbt_get_min_lat(q) == val)
     497           0 :                 return count;
     498             : 
     499             :         /*
     500             :          * Ensure that the queue is idled, in case the latency update
     501             :          * ends up either enabling or disabling wbt completely. We can't
     502             :          * have IO inflight if that happens.
     503             :          */
     504           0 :         blk_mq_freeze_queue(q);
     505           0 :         blk_mq_quiesce_queue(q);
     506             : 
     507           0 :         wbt_set_min_lat(q, val);
     508             : 
     509           0 :         blk_mq_unquiesce_queue(q);
     510           0 :         blk_mq_unfreeze_queue(q);
     511             : 
     512           0 :         return count;
     513             : }
     514             : 
     515           0 : static ssize_t queue_wc_show(struct request_queue *q, char *page)
     516             : {
     517           0 :         if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
     518           0 :                 return sprintf(page, "write back\n");
     519             : 
     520           0 :         return sprintf(page, "write through\n");
     521             : }
     522             : 
     523           0 : static ssize_t queue_wc_store(struct request_queue *q, const char *page,
     524             :                               size_t count)
     525             : {
     526           0 :         int set = -1;
     527             : 
     528           0 :         if (!strncmp(page, "write back", 10))
     529             :                 set = 1;
     530           0 :         else if (!strncmp(page, "write through", 13) ||
     531           0 :                  !strncmp(page, "none", 4))
     532             :                 set = 0;
     533             : 
     534           0 :         if (set == -1)
     535             :                 return -EINVAL;
     536             : 
     537           0 :         if (set)
     538           0 :                 blk_queue_flag_set(QUEUE_FLAG_WC, q);
     539             :         else
     540           0 :                 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
     541             : 
     542           0 :         return count;
     543             : }
     544             : 
     545           0 : static ssize_t queue_fua_show(struct request_queue *q, char *page)
     546             : {
     547           0 :         return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
     548             : }
     549             : 
     550           0 : static ssize_t queue_dax_show(struct request_queue *q, char *page)
     551             : {
     552           0 :         return queue_var_show(blk_queue_dax(q), page);
     553             : }
     554             : 
     555             : #define QUEUE_RO_ENTRY(_prefix, _name)                  \
     556             : static struct queue_sysfs_entry _prefix##_entry = {     \
     557             :         .attr   = { .name = _name, .mode = 0444 },      \
     558             :         .show   = _prefix##_show,                       \
     559             : };
     560             : 
     561             : #define QUEUE_RW_ENTRY(_prefix, _name)                  \
     562             : static struct queue_sysfs_entry _prefix##_entry = {     \
     563             :         .attr   = { .name = _name, .mode = 0644 },      \
     564             :         .show   = _prefix##_show,                       \
     565             :         .store  = _prefix##_store,                      \
     566             : };
     567             : 
     568             : QUEUE_RW_ENTRY(queue_requests, "nr_requests");
     569             : QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
     570             : QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
     571             : QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
     572             : QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
     573             : QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
     574             : QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
     575             : QUEUE_RW_ENTRY(elv_iosched, "scheduler");
     576             : 
     577             : QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
     578             : QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
     579             : QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
     580             : QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
     581             : QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
     582             : 
     583             : QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
     584             : QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
     585             : QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
     586             : QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
     587             : QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
     588             : 
     589             : QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
     590             : QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
     591             : QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
     592             : QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
     593             : 
     594             : QUEUE_RO_ENTRY(queue_zoned, "zoned");
     595             : QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
     596             : QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
     597             : QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
     598             : 
     599             : QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
     600             : QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
     601             : QUEUE_RW_ENTRY(queue_poll, "io_poll");
     602             : QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
     603             : QUEUE_RW_ENTRY(queue_wc, "write_cache");
     604             : QUEUE_RO_ENTRY(queue_fua, "fua");
     605             : QUEUE_RO_ENTRY(queue_dax, "dax");
     606             : QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
     607             : QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
     608             : QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
     609             : 
     610             : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
     611             : QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
     612             : #endif
     613             : 
     614             : /* legacy alias for logical_block_size: */
     615             : static struct queue_sysfs_entry queue_hw_sector_size_entry = {
     616             :         .attr = {.name = "hw_sector_size", .mode = 0444 },
     617             :         .show = queue_logical_block_size_show,
     618             : };
     619             : 
     620             : QUEUE_RW_ENTRY(queue_nonrot, "rotational");
     621             : QUEUE_RW_ENTRY(queue_iostats, "iostats");
     622             : QUEUE_RW_ENTRY(queue_random, "add_random");
     623             : QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
     624             : 
     625             : static struct attribute *queue_attrs[] = {
     626             :         &queue_requests_entry.attr,
     627             :         &queue_ra_entry.attr,
     628             :         &queue_max_hw_sectors_entry.attr,
     629             :         &queue_max_sectors_entry.attr,
     630             :         &queue_max_segments_entry.attr,
     631             :         &queue_max_discard_segments_entry.attr,
     632             :         &queue_max_integrity_segments_entry.attr,
     633             :         &queue_max_segment_size_entry.attr,
     634             :         &elv_iosched_entry.attr,
     635             :         &queue_hw_sector_size_entry.attr,
     636             :         &queue_logical_block_size_entry.attr,
     637             :         &queue_physical_block_size_entry.attr,
     638             :         &queue_chunk_sectors_entry.attr,
     639             :         &queue_io_min_entry.attr,
     640             :         &queue_io_opt_entry.attr,
     641             :         &queue_discard_granularity_entry.attr,
     642             :         &queue_discard_max_entry.attr,
     643             :         &queue_discard_max_hw_entry.attr,
     644             :         &queue_discard_zeroes_data_entry.attr,
     645             :         &queue_write_same_max_entry.attr,
     646             :         &queue_write_zeroes_max_entry.attr,
     647             :         &queue_zone_append_max_entry.attr,
     648             :         &queue_zone_write_granularity_entry.attr,
     649             :         &queue_nonrot_entry.attr,
     650             :         &queue_zoned_entry.attr,
     651             :         &queue_nr_zones_entry.attr,
     652             :         &queue_max_open_zones_entry.attr,
     653             :         &queue_max_active_zones_entry.attr,
     654             :         &queue_nomerges_entry.attr,
     655             :         &queue_rq_affinity_entry.attr,
     656             :         &queue_iostats_entry.attr,
     657             :         &queue_stable_writes_entry.attr,
     658             :         &queue_random_entry.attr,
     659             :         &queue_poll_entry.attr,
     660             :         &queue_wc_entry.attr,
     661             :         &queue_fua_entry.attr,
     662             :         &queue_dax_entry.attr,
     663             :         &queue_wb_lat_entry.attr,
     664             :         &queue_poll_delay_entry.attr,
     665             :         &queue_io_timeout_entry.attr,
     666             : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
     667             :         &blk_throtl_sample_time_entry.attr,
     668             : #endif
     669             :         &queue_virt_boundary_mask_entry.attr,
     670             :         NULL,
     671             : };
     672             : 
     673           0 : static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
     674             :                                 int n)
     675             : {
     676           0 :         struct request_queue *q =
     677           0 :                 container_of(kobj, struct request_queue, kobj);
     678             : 
     679           0 :         if (attr == &queue_io_timeout_entry.attr &&
     680           0 :                 (!q->mq_ops || !q->mq_ops->timeout))
     681             :                         return 0;
     682             : 
     683           0 :         if ((attr == &queue_max_open_zones_entry.attr ||
     684             :              attr == &queue_max_active_zones_entry.attr) &&
     685             :             !blk_queue_is_zoned(q))
     686             :                 return 0;
     687             : 
     688           0 :         return attr->mode;
     689             : }
     690             : 
     691             : static struct attribute_group queue_attr_group = {
     692             :         .attrs = queue_attrs,
     693             :         .is_visible = queue_attr_visible,
     694             : };
     695             : 
     696             : 
     697             : #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
     698             : 
     699             : static ssize_t
     700           0 : queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
     701             : {
     702           0 :         struct queue_sysfs_entry *entry = to_queue(attr);
     703           0 :         struct request_queue *q =
     704           0 :                 container_of(kobj, struct request_queue, kobj);
     705             :         ssize_t res;
     706             : 
     707           0 :         if (!entry->show)
     708             :                 return -EIO;
     709           0 :         mutex_lock(&q->sysfs_lock);
     710           0 :         res = entry->show(q, page);
     711           0 :         mutex_unlock(&q->sysfs_lock);
     712           0 :         return res;
     713             : }
     714             : 
     715             : static ssize_t
     716           0 : queue_attr_store(struct kobject *kobj, struct attribute *attr,
     717             :                     const char *page, size_t length)
     718             : {
     719           0 :         struct queue_sysfs_entry *entry = to_queue(attr);
     720             :         struct request_queue *q;
     721             :         ssize_t res;
     722             : 
     723           0 :         if (!entry->store)
     724             :                 return -EIO;
     725             : 
     726           0 :         q = container_of(kobj, struct request_queue, kobj);
     727           0 :         mutex_lock(&q->sysfs_lock);
     728           0 :         res = entry->store(q, page, length);
     729           0 :         mutex_unlock(&q->sysfs_lock);
     730           0 :         return res;
     731             : }
     732             : 
     733           0 : static void blk_free_queue_rcu(struct rcu_head *rcu_head)
     734             : {
     735           0 :         struct request_queue *q = container_of(rcu_head, struct request_queue,
     736             :                                                rcu_head);
     737             : 
     738           0 :         kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
     739           0 : }
     740             : 
     741             : /**
     742             :  * blk_release_queue - releases all allocated resources of the request_queue
     743             :  * @kobj: pointer to a kobject, whose container is a request_queue
     744             :  *
     745             :  * This function releases all allocated resources of the request queue.
     746             :  *
     747             :  * The struct request_queue refcount is incremented with blk_get_queue() and
     748             :  * decremented with blk_put_queue(). Once the refcount reaches 0 this function
     749             :  * is called.
     750             :  *
     751             :  * For drivers that have a request_queue on a gendisk and added with
     752             :  * __device_add_disk() the refcount to request_queue will reach 0 with
     753             :  * the last put_disk() called by the driver. For drivers which don't use
     754             :  * __device_add_disk() this happens with blk_cleanup_queue().
     755             :  *
     756             :  * Drivers exist which depend on the release of the request_queue to be
     757             :  * synchronous, it should not be deferred.
     758             :  *
     759             :  * Context: can sleep
     760             :  */
     761           0 : static void blk_release_queue(struct kobject *kobj)
     762             : {
     763           0 :         struct request_queue *q =
     764           0 :                 container_of(kobj, struct request_queue, kobj);
     765             : 
     766             :         might_sleep();
     767             : 
     768           0 :         percpu_ref_exit(&q->q_usage_counter);
     769             : 
     770           0 :         if (q->poll_stat)
     771           0 :                 blk_stat_remove_callback(q, q->poll_cb);
     772           0 :         blk_stat_free_callback(q->poll_cb);
     773             : 
     774           0 :         blk_free_queue_stats(q->stats);
     775           0 :         kfree(q->poll_stat);
     776             : 
     777           0 :         blk_queue_free_zone_bitmaps(q);
     778             : 
     779           0 :         if (queue_is_mq(q))
     780           0 :                 blk_mq_release(q);
     781             : 
     782             :         blk_trace_shutdown(q);
     783           0 :         mutex_lock(&q->debugfs_mutex);
     784           0 :         debugfs_remove_recursive(q->debugfs_dir);
     785           0 :         mutex_unlock(&q->debugfs_mutex);
     786             : 
     787           0 :         if (queue_is_mq(q))
     788             :                 blk_mq_debugfs_unregister(q);
     789             : 
     790           0 :         bioset_exit(&q->bio_split);
     791             : 
     792           0 :         if (blk_queue_has_srcu(q))
     793           0 :                 cleanup_srcu_struct(q->srcu);
     794             : 
     795           0 :         ida_simple_remove(&blk_queue_ida, q->id);
     796           0 :         call_rcu(&q->rcu_head, blk_free_queue_rcu);
     797           0 : }
     798             : 
     799             : static const struct sysfs_ops queue_sysfs_ops = {
     800             :         .show   = queue_attr_show,
     801             :         .store  = queue_attr_store,
     802             : };
     803             : 
     804             : struct kobj_type blk_queue_ktype = {
     805             :         .sysfs_ops      = &queue_sysfs_ops,
     806             :         .release        = blk_release_queue,
     807             : };
     808             : 
     809             : /**
     810             :  * blk_register_queue - register a block layer queue with sysfs
     811             :  * @disk: Disk of which the request queue should be registered with sysfs.
     812             :  */
     813           0 : int blk_register_queue(struct gendisk *disk)
     814             : {
     815             :         int ret;
     816           0 :         struct device *dev = disk_to_dev(disk);
     817           0 :         struct request_queue *q = disk->queue;
     818             : 
     819           0 :         ret = blk_trace_init_sysfs(dev);
     820             :         if (ret)
     821             :                 return ret;
     822             : 
     823           0 :         mutex_lock(&q->sysfs_dir_lock);
     824             : 
     825           0 :         ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
     826           0 :         if (ret < 0) {
     827             :                 blk_trace_remove_sysfs(dev);
     828             :                 goto unlock;
     829             :         }
     830             : 
     831           0 :         ret = sysfs_create_group(&q->kobj, &queue_attr_group);
     832           0 :         if (ret) {
     833             :                 blk_trace_remove_sysfs(dev);
     834           0 :                 kobject_del(&q->kobj);
     835           0 :                 kobject_put(&dev->kobj);
     836           0 :                 goto unlock;
     837             :         }
     838             : 
     839           0 :         mutex_lock(&q->debugfs_mutex);
     840           0 :         q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
     841             :                                             blk_debugfs_root);
     842           0 :         mutex_unlock(&q->debugfs_mutex);
     843             : 
     844           0 :         if (queue_is_mq(q)) {
     845           0 :                 __blk_mq_register_dev(dev, q);
     846           0 :                 blk_mq_debugfs_register(q);
     847             :         }
     848             : 
     849           0 :         mutex_lock(&q->sysfs_lock);
     850             : 
     851           0 :         ret = disk_register_independent_access_ranges(disk, NULL);
     852           0 :         if (ret)
     853             :                 goto put_dev;
     854             : 
     855           0 :         if (q->elevator) {
     856           0 :                 ret = elv_register_queue(q, false);
     857           0 :                 if (ret)
     858             :                         goto put_dev;
     859             :         }
     860             : 
     861           0 :         ret = blk_crypto_sysfs_register(q);
     862             :         if (ret)
     863             :                 goto put_dev;
     864             : 
     865           0 :         blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
     866           0 :         wbt_enable_default(q);
     867           0 :         blk_throtl_register_queue(q);
     868             : 
     869             :         /* Now everything is ready and send out KOBJ_ADD uevent */
     870           0 :         kobject_uevent(&q->kobj, KOBJ_ADD);
     871           0 :         if (q->elevator)
     872           0 :                 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
     873           0 :         mutex_unlock(&q->sysfs_lock);
     874             : 
     875             : unlock:
     876           0 :         mutex_unlock(&q->sysfs_dir_lock);
     877             : 
     878             :         /*
     879             :          * SCSI probing may synchronously create and destroy a lot of
     880             :          * request_queues for non-existent devices.  Shutting down a fully
     881             :          * functional queue takes measureable wallclock time as RCU grace
     882             :          * periods are involved.  To avoid excessive latency in these
     883             :          * cases, a request_queue starts out in a degraded mode which is
     884             :          * faster to shut down and is made fully functional here as
     885             :          * request_queues for non-existent devices never get registered.
     886             :          */
     887           0 :         if (!blk_queue_init_done(q)) {
     888           0 :                 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
     889           0 :                 percpu_ref_switch_to_percpu(&q->q_usage_counter);
     890             :         }
     891             : 
     892             :         return ret;
     893             : 
     894             : put_dev:
     895           0 :         elv_unregister_queue(q);
     896           0 :         disk_unregister_independent_access_ranges(disk);
     897           0 :         mutex_unlock(&q->sysfs_lock);
     898           0 :         mutex_unlock(&q->sysfs_dir_lock);
     899           0 :         kobject_del(&q->kobj);
     900             :         blk_trace_remove_sysfs(dev);
     901           0 :         kobject_put(&dev->kobj);
     902             : 
     903           0 :         return ret;
     904             : }
     905             : 
     906             : /**
     907             :  * blk_unregister_queue - counterpart of blk_register_queue()
     908             :  * @disk: Disk of which the request queue should be unregistered from sysfs.
     909             :  *
     910             :  * Note: the caller is responsible for guaranteeing that this function is called
     911             :  * after blk_register_queue() has finished.
     912             :  */
     913           0 : void blk_unregister_queue(struct gendisk *disk)
     914             : {
     915           0 :         struct request_queue *q = disk->queue;
     916             : 
     917           0 :         if (WARN_ON(!q))
     918             :                 return;
     919             : 
     920             :         /* Return early if disk->queue was never registered. */
     921           0 :         if (!blk_queue_registered(q))
     922             :                 return;
     923             : 
     924             :         /*
     925             :          * Since sysfs_remove_dir() prevents adding new directory entries
     926             :          * before removal of existing entries starts, protect against
     927             :          * concurrent elv_iosched_store() calls.
     928             :          */
     929           0 :         mutex_lock(&q->sysfs_lock);
     930           0 :         blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
     931           0 :         mutex_unlock(&q->sysfs_lock);
     932             : 
     933           0 :         mutex_lock(&q->sysfs_dir_lock);
     934             :         /*
     935             :          * Remove the sysfs attributes before unregistering the queue data
     936             :          * structures that can be modified through sysfs.
     937             :          */
     938           0 :         if (queue_is_mq(q))
     939           0 :                 blk_mq_unregister_dev(disk_to_dev(disk), q);
     940           0 :         blk_crypto_sysfs_unregister(q);
     941             :         blk_trace_remove_sysfs(disk_to_dev(disk));
     942             : 
     943           0 :         mutex_lock(&q->sysfs_lock);
     944           0 :         elv_unregister_queue(q);
     945           0 :         disk_unregister_independent_access_ranges(disk);
     946           0 :         mutex_unlock(&q->sysfs_lock);
     947             : 
     948             :         /* Now that we've deleted all child objects, we can delete the queue. */
     949           0 :         kobject_uevent(&q->kobj, KOBJ_REMOVE);
     950           0 :         kobject_del(&q->kobj);
     951             : 
     952           0 :         mutex_unlock(&q->sysfs_dir_lock);
     953             : 
     954           0 :         kobject_put(&disk_to_dev(disk)->kobj);
     955             : }

Generated by: LCOV version 1.14