net: gro: expose GRO init/cleanup to use outside of NAPI

Make GRO init and cleanup functions global to be able to use GRO
without a NAPI instance. Taking into account already global gro_flush(),
it's now fully usable standalone.
New functions are not exported, since they're not supposed to be used
outside of the kernel core code.

Tested-by: Daniel Xu <dxu@dxuuu.xyz>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Alexander Lobakin 2025-02-25 18:17:44 +01:00 committed by Paolo Abeni
parent 291515c764
commit 388d31417c
3 changed files with 40 additions and 34 deletions

View File

@ -546,6 +546,9 @@ static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb,
gro_normal_list(gro);
}
void gro_init(struct gro_node *gro);
void gro_cleanup(struct gro_node *gro);
/* This function is the alternative of 'inet_iif' and 'inet_sdif'
* functions in case we can not rely on fields of IPCB.
*

View File

@ -6850,19 +6850,6 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
static void init_gro_hash(struct napi_struct *napi)
{
int i;
for (i = 0; i < GRO_HASH_BUCKETS; i++) {
INIT_LIST_HEAD(&napi->gro.hash[i].list);
napi->gro.hash[i].count = 0;
}
napi->gro.bitmask = 0;
napi->gro.cached_napi_id = 0;
}
int dev_set_threaded(struct net_device *dev, bool threaded)
{
struct napi_struct *napi;
@ -7190,10 +7177,8 @@ void netif_napi_add_weight_locked(struct net_device *dev,
INIT_HLIST_NODE(&napi->napi_hash_node);
hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
napi->timer.function = napi_watchdog;
init_gro_hash(napi);
gro_init(&napi->gro);
napi->skb = NULL;
INIT_LIST_HEAD(&napi->gro.rx_list);
napi->gro.rx_count = 0;
napi->poll = poll;
if (weight > NAPI_POLL_WEIGHT)
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
@ -7307,22 +7292,6 @@ void napi_enable(struct napi_struct *n)
}
EXPORT_SYMBOL(napi_enable);
static void flush_gro_hash(struct napi_struct *napi)
{
int i;
for (i = 0; i < GRO_HASH_BUCKETS; i++) {
struct sk_buff *skb, *n;
list_for_each_entry_safe(skb, n, &napi->gro.hash[i].list, list)
kfree_skb(skb);
napi->gro.hash[i].count = 0;
}
napi->gro.bitmask = 0;
napi->gro.cached_napi_id = 0;
}
/* Must be called in process context */
void __netif_napi_del_locked(struct napi_struct *napi)
{
@ -7345,7 +7314,7 @@ void __netif_napi_del_locked(struct napi_struct *napi)
list_del_rcu(&napi->dev_list);
napi_free_frags(napi);
flush_gro_hash(napi);
gro_cleanup(&napi->gro);
if (napi->thread) {
kthread_stop(napi->thread);
@ -12800,7 +12769,7 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
spin_lock_init(&sd->defer_lock);
init_gro_hash(&sd->backlog);
gro_init(&sd->backlog.gro);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
INIT_LIST_HEAD(&sd->backlog.poll_list);

View File

@ -790,3 +790,37 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
return sum;
}
EXPORT_SYMBOL(__skb_gro_checksum_complete);
void gro_init(struct gro_node *gro)
{
for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) {
INIT_LIST_HEAD(&gro->hash[i].list);
gro->hash[i].count = 0;
}
gro->bitmask = 0;
gro->cached_napi_id = 0;
INIT_LIST_HEAD(&gro->rx_list);
gro->rx_count = 0;
}
void gro_cleanup(struct gro_node *gro)
{
struct sk_buff *skb, *n;
for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) {
list_for_each_entry_safe(skb, n, &gro->hash[i].list, list)
kfree_skb(skb);
gro->hash[i].count = 0;
}
gro->bitmask = 0;
gro->cached_napi_id = 0;
list_for_each_entry_safe(skb, n, &gro->rx_list, list)
kfree_skb(skb);
gro->rx_count = 0;
}