mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00

There is a problem with page pools not dma-unmapping immediately when the device is going down, and delaying it until the page pool is destroyed, which is not allowed (see links). That just got fixed for normal page pools, and we need to address memory providers as well. Unmap pages in the memory provider uninstall callback, and protect it with a new lock. There is also a gap between when a dma mapping is created and the mp is installed, so if the device is killed in between, io_uring would be holding on to dma mappings to a dead device with no one to call ->uninstall. Move it to page pool init and rely on ->is_mapped to make sure it's only done once. Link: https://lore.kernel.org/lkml/8067f204-1380-4d37-8ffd-007fc6f26738@kernel.org/T/ Link: https://lore.kernel.org/all/20250409-page-pool-track-dma-v9-0-6a9ef2e0cba8@redhat.com/ Fixes: 34a3e60821ab9 ("io_uring/zcrx: implement zerocopy receive pp memory provider") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/ef9b7db249b14f6e0b570a1bb77ff177389f881c.1744965853.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
76 lines
1.9 KiB
C
76 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef IOU_ZC_RX_H
|
|
#define IOU_ZC_RX_H
|
|
|
|
#include <linux/io_uring_types.h>
|
|
#include <linux/socket.h>
|
|
#include <net/page_pool/types.h>
|
|
#include <net/net_trackers.h>
|
|
|
|
struct io_zcrx_area {
|
|
struct net_iov_area nia;
|
|
struct io_zcrx_ifq *ifq;
|
|
atomic_t *user_refs;
|
|
|
|
bool is_mapped;
|
|
u16 area_id;
|
|
struct page **pages;
|
|
unsigned long nr_folios;
|
|
|
|
/* freelist */
|
|
spinlock_t freelist_lock ____cacheline_aligned_in_smp;
|
|
u32 free_count;
|
|
u32 *freelist;
|
|
};
|
|
|
|
struct io_zcrx_ifq {
|
|
struct io_ring_ctx *ctx;
|
|
struct io_zcrx_area *area;
|
|
|
|
spinlock_t rq_lock ____cacheline_aligned_in_smp;
|
|
struct io_uring *rq_ring;
|
|
struct io_uring_zcrx_rqe *rqes;
|
|
u32 cached_rq_head;
|
|
u32 rq_entries;
|
|
|
|
u32 if_rxq;
|
|
struct device *dev;
|
|
struct net_device *netdev;
|
|
netdevice_tracker netdev_tracker;
|
|
spinlock_t lock;
|
|
struct mutex dma_lock;
|
|
};
|
|
|
|
#if defined(CONFIG_IO_URING_ZCRX)
|
|
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
|
struct io_uring_zcrx_ifq_reg __user *arg);
|
|
void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx);
|
|
void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx);
|
|
int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
|
|
struct socket *sock, unsigned int flags,
|
|
unsigned issue_flags, unsigned int *len);
|
|
#else
|
|
static inline int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
|
struct io_uring_zcrx_ifq_reg __user *arg)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static inline void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
|
|
struct socket *sock, unsigned int flags,
|
|
unsigned issue_flags, unsigned int *len)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
#endif
|
|
|
|
int io_recvzc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
#endif
|