Skip to content

Commit b6b2bb5

Browse files
isilenceaxboe
authored andcommitted
io_uring: never overflow io_aux_cqe
Now all callers of io_aux_cqe() set allow_overflow to false, remove the parameter and not allow overflowing auxilary multishot cqes. When CQ is full the function callers and all multishot requests in general are expected to complete the request. That prevents indefinite in-background grows of the overflow list and let's the userspace to handle the backlog at its own pace. Resubmitting a request should also be faster than accounting a bunch of overflows, so it should be better for perf when it happens, but a well behaving userspace should be trying to avoid overflows in any case. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/bb20d14d708ea174721e58bb53786b0521e4dd6d.1691757663.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 056695b commit b6b2bb5

File tree

5 files changed

+16
-14
lines changed

5 files changed

+16
-14
lines changed

io_uring/io_uring.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -939,15 +939,18 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
939939
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
940940
}
941941

942-
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
943-
bool allow_overflow)
942+
/*
943+
* A helper for multishot requests posting additional CQEs.
944+
* Should only be used from a task_work including IO_URING_F_MULTISHOT.
945+
*/
946+
bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
944947
{
945948
struct io_ring_ctx *ctx = req->ctx;
946949
u64 user_data = req->cqe.user_data;
947950
struct io_uring_cqe *cqe;
948951

949952
if (!defer)
950-
return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
953+
return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
951954

952955
lockdep_assert_held(&ctx->uring_lock);
953956

@@ -962,7 +965,7 @@ bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
962965
* however it's main job is to prevent unbounded posted completions,
963966
* and in that it works just as well.
964967
*/
965-
if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
968+
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
966969
return false;
967970

968971
cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];

io_uring/io_uring.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
4444
void io_req_defer_failed(struct io_kiocb *req, s32 res);
4545
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
4646
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
47-
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
48-
bool allow_overflow);
47+
bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
4948
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
5049

5150
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);

io_uring/net.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -641,8 +641,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
641641
}
642642

643643
if (!mshot_finished) {
644-
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
645-
*ret, cflags | IORING_CQE_F_MORE, false)) {
644+
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
645+
*ret, cflags | IORING_CQE_F_MORE)) {
646646
io_recv_prep_retry(req);
647647
/* Known not-empty or unknown state, retry */
648648
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
@@ -1366,8 +1366,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
13661366

13671367
if (ret < 0)
13681368
return ret;
1369-
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
1370-
IORING_CQE_F_MORE, false))
1369+
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1370+
ret, IORING_CQE_F_MORE))
13711371
goto retry;
13721372

13731373
return -ECANCELED;

io_uring/poll.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
300300
__poll_t mask = mangle_poll(req->cqe.res &
301301
req->apoll_events);
302302

303-
if (!io_aux_cqe(req, ts->locked, mask,
304-
IORING_CQE_F_MORE, false)) {
303+
if (!io_fill_cqe_req_aux(req, ts->locked, mask,
304+
IORING_CQE_F_MORE)) {
305305
io_req_set_res(req, mask, 0);
306306
return IOU_POLL_REMOVE_POLL_USE_RES;
307307
}

io_uring/timeout.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
7373

7474
if (!io_timeout_finish(timeout, data)) {
7575
bool filled;
76-
filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE,
77-
false);
76+
filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME,
77+
IORING_CQE_F_MORE);
7878
if (filled) {
7979
/* re-arm timer */
8080
spin_lock_irq(&ctx->timeout_lock);

0 commit comments

Comments
 (0)