// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to sysfs handling
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/blktrace_api.h>
#include <linux/debugfs.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-wbt.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
struct queue_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct gendisk *disk, char *page);
ssize_t (*show_limit)(struct gendisk *disk, char *page);
ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
int (*store_limit)(struct gendisk *disk, const char *page,
size_t count, struct queue_limits *lim);
};
static ssize_t
queue_var_show(unsigned long var, char *page)
{
return sysfs_emit(page, "%lu\n", var);
}
static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
int err;
unsigned long v;
err = kstrtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
*var = v;
return count;
}
static ssize_t queue_requests_show(struct gendisk *disk, char *page)
{
ssize_t ret;
mutex_lock(&disk->queue->elevator_lock);
ret = queue_var_show(disk->queue->nr_requests, page);
mutex_unlock(&disk->queue->elevator_lock);
return ret;
}
static ssize_t
queue_requests_store(struct gendisk *disk, const char *page, size_t count)
{
struct request_queue *q = disk->queue;
struct blk_mq_tag_set *set = q->tag_set;
struct elevator_tags *et = NULL;
unsigned int memflags;
unsigned long nr;
int ret;
ret = queue_var_store(&nr, page, count);
if (ret < 0)
return ret;
/*
* Serialize updating nr_requests with concurrent queue_requests_store()
* and switching elevator.
*/
down_write(&set->update_nr_hwq_lock);
if (nr == q->nr_requests)
goto unlock;
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
/*
* Switching elevator is protected by update_nr_hwq_lock:
* - read lock is held from elevator sysfs attribute;
* - write lock is held from updating nr_hw_queues;
* Hence it's safe to access q->elevator here with write lock held.
*/
if (nr <= set->reserved_tags ||
(q->elevator && nr > MAX_SCHED_RQ) ||
(!q->elevator && nr > set->queue_depth)) {
ret = -EINVAL;
goto unlock;
}
if (!blk_mq_is_shared_tags(set->flags) && q->elevator &&
nr > q->elevator->et->nr_requests) {
/*
* Tags will grow, allocate memory before freezing queue to
* prevent deadlock.
*/
et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr);
if (!et) {
ret = -ENOMEM;
goto unlock;
}
}
memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->elevator_lock);
et = blk_mq_update_nr_requests(q, et, nr);
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
if (et)
blk_mq_free_sched_tags(et, set);
unlock:
up_write(&set->update_nr_hwq_lock);
return ret;
}
static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
ssize_t ret;
mutex_lock(&disk->queue->limits_lock);
ret = queue_var_show(disk->bdi->ra_pages <<