io_uring/rsrc: separate kbuf offset adjustments

Kernel registered buffers are special because segments are not uniform
in size, and we have a bunch of optimisations based on that uniformity
for normal buffers. Handle kbuf separately, it'll be cleaner this way.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4e9e5990b0ab5aee723c0be5cd9b5bcf810375f9.1744882081.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2025-04-17 10:32:32 +01:00 committed by Jens Axboe
parent 1ac5712888
commit 50169d0754

View File

@ -1048,11 +1048,14 @@ static int io_import_fixed(int ddir, struct iov_iter *iter,
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
/*
* Might not be a start of buffer, set size appropriately
* and advance us to the beginning.
*/
offset = buf_addr - imu->ubuf;
if (imu->is_kbuf) {
iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
iov_iter_advance(iter, offset);
return 0;
}
iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
/*
@ -1072,17 +1075,9 @@ static int io_import_fixed(int ddir, struct iov_iter *iter,
* be folio_size aligned.
*/
bvec = imu->bvec;
/*
* Kernel buffer bvecs, on the other hand, don't necessarily
* have the size property of user registered ones, so we have
* to use the slow iter advance.
*/
if (offset < bvec->bv_len) {
iter->count -= offset;
iter->iov_offset = offset;
} else if (imu->is_kbuf) {
iov_iter_advance(iter, offset);
} else {
unsigned long seg_skip;