net: generalise net_iov chunk owners

Currently net_iov stores a pointer to struct dmabuf_genpool_chunk_owner,
which serves as a useful abstraction to share data and provide a
context. However, it's too devmem specific, and we want to reuse it for
other memory providers, and for that we need to decouple net_iov from
devmem. Make net_iov to point to a new base structure called
net_iov_area, which dmabuf_genpool_chunk_owner extends.

Reviewed-by: Mina Almasry <almasrymina@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
Link: https://patch.msgid.link/20250204215622.695511-4-dw@davidwei.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Pavel Begunkov 2025-02-04 13:56:14 -08:00 committed by Jakub Kicinski
parent 297d389e9e
commit 7d60fa9e1a
3 changed files with 42 additions and 29 deletions

View File

@ -24,11 +24,20 @@ struct net_iov {
unsigned long __unused_padding;
unsigned long pp_magic;
struct page_pool *pp;
struct dmabuf_genpool_chunk_owner *owner;
struct net_iov_area *owner;
unsigned long dma_addr;
atomic_long_t pp_ref_count;
};
struct net_iov_area {
/* Array of net_iovs for this area. */
struct net_iov *niovs;
size_t num_niovs;
/* Offset into the dma-buf where this chunk starts. */
unsigned long base_virtual;
};
/* These fields in struct page are used by the page_pool and net stack:
*
* struct {
@ -54,6 +63,16 @@ NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
{
return niov->owner;
}
static inline unsigned int net_iov_idx(const struct net_iov *niov)
{
return niov - net_iov_owner(niov)->niovs;
}
/* netmem */
/**

View File

@ -33,14 +33,15 @@ static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
{
struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
kvfree(owner->niovs);
kvfree(owner->area.niovs);
kfree(owner);
}
static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
{
struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
struct dmabuf_genpool_chunk_owner *owner;
owner = net_devmem_iov_to_chunk_owner(niov);
return owner->base_dma_addr +
((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
}
@ -83,7 +84,7 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
offset = dma_addr - owner->base_dma_addr;
index = offset / PAGE_SIZE;
niov = &owner->niovs[index];
niov = &owner->area.niovs[index];
niov->pp_magic = 0;
niov->pp = NULL;
@ -261,9 +262,9 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
goto err_free_chunks;
}
owner->base_virtual = virtual;
owner->area.base_virtual = virtual;
owner->base_dma_addr = dma_addr;
owner->num_niovs = len / PAGE_SIZE;
owner->area.num_niovs = len / PAGE_SIZE;
owner->binding = binding;
err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
@ -275,17 +276,17 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
goto err_free_chunks;
}
owner->niovs = kvmalloc_array(owner->num_niovs,
sizeof(*owner->niovs),
GFP_KERNEL);
if (!owner->niovs) {
owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
sizeof(*owner->area.niovs),
GFP_KERNEL);
if (!owner->area.niovs) {
err = -ENOMEM;
goto err_free_chunks;
}
for (i = 0; i < owner->num_niovs; i++) {
niov = &owner->niovs[i];
niov->owner = owner;
for (i = 0; i < owner->area.num_niovs; i++) {
niov = &owner->area.niovs[i];
niov->owner = &owner->area;
page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
net_devmem_get_dma_addr(niov));
}

View File

@ -10,6 +10,8 @@
#ifndef _NET_DEVMEM_H
#define _NET_DEVMEM_H
#include <net/netmem.h>
struct netlink_ext_ack;
struct net_devmem_dmabuf_binding {
@ -51,17 +53,11 @@ struct net_devmem_dmabuf_binding {
* allocations from this chunk.
*/
struct dmabuf_genpool_chunk_owner {
/* Offset into the dma-buf where this chunk starts. */
unsigned long base_virtual;
struct net_iov_area area;
struct net_devmem_dmabuf_binding *binding;
/* dma_addr of the start of the chunk. */
dma_addr_t base_dma_addr;
/* Array of net_iovs for this chunk. */
struct net_iov *niovs;
size_t num_niovs;
struct net_devmem_dmabuf_binding *binding;
};
void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
@ -75,20 +71,17 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
void dev_dmabuf_uninstall(struct net_device *dev);
static inline struct dmabuf_genpool_chunk_owner *
net_iov_owner(const struct net_iov *niov)
net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
{
return niov->owner;
}
struct net_iov_area *owner = net_iov_owner(niov);
static inline unsigned int net_iov_idx(const struct net_iov *niov)
{
return niov - net_iov_owner(niov)->niovs;
return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
}
static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov *niov)
{
return net_iov_owner(niov)->binding;
return net_devmem_iov_to_chunk_owner(niov)->binding;
}
static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
@ -98,7 +91,7 @@ static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
struct net_iov_area *owner = net_iov_owner(niov);
return owner->base_virtual +
((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);