diff options
| author | Jens Axboe <axboe@kernel.dk> | 2025-12-11 03:25:41 -0700 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-12-28 15:54:45 -0700 |
| commit | 3c7d76d6128a0fef68e6540754bf85a44a29bb59 (patch) | |
| tree | 1fed05d9c7c6ba8891b7a0517c725ea899be5844 /io_uring/rw.c | |
| parent | f8f9c1f4d0c7a64600e2ca312dec824a0bc2f1da (diff) | |
io_uring: IOPOLL polling improvements
io_uring manages issued and pending IOPOLL read/write requests in a
singly linked list. One downside of that is that individual items
cannot easily be removed from that list, and as a result, io_uring
will only complete a completed request N in that list if 0..N-1 are
also complete. For homogenous IO this isn't necessarily an issue,
but if different devices are involved in polling in the same ring, or
if disparate IO from the same device is being polled for, this can
defer completion of some requests unnecessarily.
Move to a doubly linked list for iopoll completions instead, making it
possible to easily complete whatever requests that were polled done
successfully.
Co-developed-by: Fengnan Chang <fengnanchang@gmail.com>
Link: https://lore.kernel.org/io-uring/20251210085501.84261-1-changfengnan@bytedance.com/
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rw.c')
| -rw-r--r-- | io_uring/rw.c | 28 |
1 files changed, 8 insertions, 20 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c index 70ca88cc1f54..307f1f39d9f3 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1315,9 +1315,9 @@ static int io_uring_hybrid_poll(struct io_kiocb *req, int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { - struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = 0; DEFINE_IO_COMP_BATCH(iob); + struct io_kiocb *req, *tmp; int nr_events = 0; /* @@ -1327,8 +1327,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; - wq_list_for_each(pos, start, &ctx->iopoll_list) { - struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + list_for_each_entry(req, &ctx->iopoll_list, iopoll_node) { int ret; /* @@ -1357,31 +1356,20 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (!rq_list_empty(&iob.req_list)) iob.complete(&iob); - else if (!pos) - return 0; - - prev = start; - wq_list_for_each_resume(pos, prev) { - struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_node) { /* order with io_complete_rw_iopoll(), e.g. ->result updates */ if (!smp_load_acquire(&req->iopoll_completed)) - break; + continue; + list_del(&req->iopoll_node); + wq_list_add_tail(&req->comp_list, &ctx->submit_state.compl_reqs); nr_events++; req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); if (req->opcode != IORING_OP_URING_CMD) io_req_rw_cleanup(req, 0); } - if (unlikely(!nr_events)) - return 0; - - pos = start ? start->next : ctx->iopoll_list.first; - wq_list_cut(&ctx->iopoll_list, prev, start); - - if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) - return 0; - ctx->submit_state.compl_reqs.first = pos; - __io_submit_flush_completions(ctx); + if (nr_events) + __io_submit_flush_completions(ctx); return nr_events; } |
