diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2025-09-16 15:27:44 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-09-16 12:37:20 -0600 |
commit | 9eb3c571787d1ef7e2c3393c153b1a6b103a26e3 (patch) | |
tree | 8203aeed17bc68322065716ecf4a4ed88a5bc22c | |
parent | 1b3aa3900782707ec2f4cc1651bc82c628f25d2b (diff) |
io_uring/zcrx: improve rqe cache alignment
Refill queue entries are 16B structures, but because of the ring header
placement, they're 8B aligned but not naturally / 16B aligned, which
means some of them span across 2 cache lines. Push rqes to a new cache
line.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | io_uring/zcrx.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 51fd2350dbe9..c02045e4c1b6 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -352,7 +352,7 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, void *ptr; int ret; - off = sizeof(struct io_uring); + off = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES); size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries; if (size > rd->size) return -EINVAL; @@ -367,6 +367,10 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, ptr = io_region_get_ptr(&ifq->region); ifq->rq_ring = (struct io_uring *)ptr; ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off); + + reg->offsets.head = offsetof(struct io_uring, head); + reg->offsets.tail = offsetof(struct io_uring, tail); + reg->offsets.rqes = off; return 0; } @@ -618,9 +622,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, goto err; ifq->if_rxq = reg.if_rxq; - reg.offsets.rqes = sizeof(struct io_uring); - reg.offsets.head = offsetof(struct io_uring, head); - reg.offsets.tail = offsetof(struct io_uring, tail); reg.zcrx_id = id; scoped_guard(mutex, &ctx->mmap_lock) { |