aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-05-08 13:18:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-05-08 13:18:13 -0700
commitcbf457c584b5cbd0d44e8f05edaf3e189e894a68 (patch)
treea66d0b53c9cb283ee90048551f53c99233c59cab
parent8be01e1280912a84f6bcf963ceed6c9f13ba1986 (diff)
parentf7700a4415afb3ac1767a556094e4ef8bd440e41 (diff)
Merge tag 'block-7.1-20260508' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linuxHEADmaster
Pull block fixes from Jens Axboe: - Fix for ublk not doing an actual issue from the task_work fallback path. Any request hitting that should be canceled automatically - Fix for uring_cmd prep side handling, for the block side uring_cmd discard handling - Fix for missing validation of the io and physical block size shifts - Fix for a use-after-free in ublk's cancel command handling * tag 'block-7.1-20260508' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: ublk: fix use-after-free in ublk_cancel_cmd() ublk: validate physical_bs_shift, io_min_shift and io_opt_shift block: only read from sqe on initial invocation of blkdev_uring_cmd() ublk: don't issue uring_cmd from fallback task work
-rw-r--r--block/ioctl.c24
-rw-r--r--drivers/block/ublk_drv.c42
2 files changed, 50 insertions, 16 deletions
diff --git a/block/ioctl.c b/block/ioctl.c
index fc3be0549aa7..ab2c9ed79946 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -857,6 +857,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
#endif
struct blk_iou_cmd {
+ u64 start;
+ u64 len;
int res;
bool nowait;
};
@@ -946,23 +948,27 @@ int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
- const struct io_uring_sqe *sqe = cmd->sqe;
u32 cmd_op = cmd->cmd_op;
- uint64_t start, len;
- if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
- sqe->rw_flags || sqe->file_index))
- return -EINVAL;
+ /* Read what we need from the SQE on the first issue */
+ if (!(issue_flags & IORING_URING_CMD_REISSUE)) {
+ const struct io_uring_sqe *sqe = cmd->sqe;
+
+ if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
+ sqe->rw_flags || sqe->file_index))
+ return -EINVAL;
+
+ bic->start = READ_ONCE(sqe->addr);
+ bic->len = READ_ONCE(sqe->addr3);
+ }
bic->res = 0;
bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
- start = READ_ONCE(sqe->addr);
- len = READ_ONCE(sqe->addr3);
-
switch (cmd_op) {
case BLOCK_URING_CMD_DISCARD:
- return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
+ return blkdev_cmd_discard(cmd, bdev, bic->start, bic->len,
+ bic->nowait);
}
return -EINVAL;
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 8e5f3738c203..6d13f1481de0 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -900,6 +900,20 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
+ /*
+ * 256M is a reasonable upper bound for physical block size,
+ * io_min and io_opt; it aligns with the maximum physical
+ * block size possible in NVMe.
+ */
+ if (p->physical_bs_shift > ilog2(SZ_256M))
+ return -EINVAL;
+
+ if (p->io_min_shift > ilog2(SZ_256M))
+ return -EINVAL;
+
+ if (p->io_opt_shift > ilog2(SZ_256M))
+ return -EINVAL;
+
if (p->logical_bs_shift > p->physical_bs_shift)
return -EINVAL;
@@ -2397,8 +2411,14 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
{
int i;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ /* Sync with ublk_cancel_cmd() */
+ spin_lock(&ubq->cancel_lock);
+ ublk_queue_reinit(ub, ubq);
+ spin_unlock(&ubq->cancel_lock);
+ }
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
@@ -2739,6 +2759,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
{
struct ublk_io *io = &ubq->ios[tag];
struct ublk_device *ub = ubq->dev;
+ struct io_uring_cmd *cmd = NULL;
struct request *req;
bool done;
@@ -2761,12 +2782,15 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
- if (!done)
+ if (!done) {
io->flags |= UBLK_IO_FLAG_CANCELED;
+ cmd = io->cmd;
+ io->cmd = NULL;
+ }
spin_unlock(&ubq->cancel_lock);
- if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
+ if (!done && cmd)
+ io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@@ -3496,8 +3520,10 @@ static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
- int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
+ int ret = -ECANCELED;
+ if (!tw.cancel)
+ ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
io_uring_cmd_done(cmd, ret, issue_flags);
}
@@ -4990,13 +5016,15 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
*/
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
+ /* zero out partial copy so no stale params survive */
+ memset(&ub->params, 0, sizeof(ub->params));
ret = -EFAULT;
} else {
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
if (ret)
- ub->params.types = 0;
+ memset(&ub->params, 0, sizeof(ub->params));
}
mutex_unlock(&ub->mutex);