aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/ublk/null.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2025-07-13 22:34:07 +0800
committerJens Axboe <axboe@kernel.dk>2025-07-15 08:04:17 -0600
commite0054835bf6850245e17417fdbe80e232737e537 (patch)
treeb0b56606994d6a0201f3962e4cf56d26bd171dd9 /tools/testing/selftests/ublk/null.c
parentb36c73251aaec6c9941b5493637a9007d0a56616 (diff)
selftests: ublk: pass 'ublk_thread *' to ->queue_io() and ->tgt_io_done()
'struct thread' is task local structure, and the related code will become more readable if we pass it via parameter. Meantime pass 'ublk_thread *' to ublk_io_alloc_sqes(), and this way is natural since we use per-thread io_uring for handling IO. More importantly it helps much for removing the current ubq_daemon or per-io-task limit. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250713143415.2857561-13-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'tools/testing/selftests/ublk/null.c')
-rw-r--r--tools/testing/selftests/ublk/null.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c
index ea3da53437e9..e29a005fc1cc 100644
--- a/tools/testing/selftests/ublk/null.c
+++ b/tools/testing/selftests/ublk/null.c
@@ -55,12 +55,13 @@ static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod,
sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1);
}
-static int null_queue_zc_io(struct ublk_queue *q, int tag)
+static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[3];
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
+ ublk_io_alloc_sqes(t, sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[0]->user_data = build_user_data(tag,
@@ -77,18 +78,19 @@ static int null_queue_zc_io(struct ublk_queue *q, int tag)
return 2;
}
-static int null_queue_auto_zc_io(struct ublk_queue *q, int tag)
+static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[1];
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
+ ublk_io_alloc_sqes(t, sqe, 1);
__setup_nop_io(tag, iod, sqe[0], q->q_id);
return 1;
}
-static void ublk_null_io_done(struct ublk_queue *q,
- const struct io_uring_cqe *cqe)
+static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q,
+ const struct io_uring_cqe *cqe)
{
unsigned tag = user_data_to_tag(cqe->user_data);
unsigned op = user_data_to_op(cqe->user_data);
@@ -110,7 +112,8 @@ static void ublk_null_io_done(struct ublk_queue *q,
ublk_complete_io(q, tag, io->result);
}
-static int ublk_null_queue_io(struct ublk_queue *q, int tag)
+static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned auto_zc = ublk_queue_use_auto_zc(q);
@@ -118,9 +121,9 @@ static int ublk_null_queue_io(struct ublk_queue *q, int tag)
int queued;
if (auto_zc && !ublk_io_auto_zc_fallback(iod))
- queued = null_queue_auto_zc_io(q, tag);
+ queued = null_queue_auto_zc_io(t, q, tag);
else if (zc)
- queued = null_queue_zc_io(q, tag);
+ queued = null_queue_zc_io(t, q, tag);
else {
ublk_complete_io(q, tag, iod->nr_sectors << 9);
return 0;