mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Including fixes from netfilter.
Current release - regressions: - core: hold instance lock during NETDEV_CHANGE - rtnetlink: fix bad unlock balance in do_setlink(). - ipv6: - fix null-ptr-deref in addrconf_add_ifaddr(). - align behavior across nexthops during path selection Previous releases - regressions: - sctp: prevent transport UaF in sendmsg - mptcp: only inc MPJoinAckHMacFailure for HMAC failures Previous releases - always broken: - sched: - make ->qlen_notify() idempotent - ensure sufficient space when sending filter netlink notifications - sch_sfq: really don't allow 1 packet limit - netfilter: fix incorrect avx2 match of 5th field octet - tls: explicitly disallow disconnect - eth: octeontx2-pf: fix VF root node parent queue priority Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmf3xusSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkud0P/iWOQB0oj0nvxl2ionPzgJEPduxuF0V6 YPyDBUzLC7Gq6NmTcdDlNJt8fE6UmKUIneghUm9Ss7LRpKv0/TPvorKMSK44Zt53 a5q49JeoI0TvnnhJesdHjiF31hrInqZmcX8OjSH8Q/SCKuy7rsgzao0vjvhd7lxm wA6LlWnJO1Pf991nNpbjUSoAZ7CMNlEIewGkdq0+6UADC7D9VagKTgIkFKw1BvRw 2Eb2pzvdO9Pj02+l/mjdRhUzMZlr+FG+WBqXk5oKR0YZ2t3CS4O9/UUBoAn775tM gCfzepNuAUXGX0I6h+DANCNuswWuG/IvYTdhy+hRWblYeCkILU60E8eVMlh7tpII fUd5GSRhX1NpGNHUlDG/4b6IcjMO3ebtce2cm2Y9t2CUe7EqB0HZyvTczNroTxip KXrXcCBuEkzxXCZhaN/CrBu8Piu8vJk/rMH5ha1khce9CkmYY+m9ruvsYjZmPI+/ P/SFkRdb/yV/SIOmay8FCJsy60t4FOtLnlDDrnygq4Q/9a7VwafebVpKS1fbTELG ZTiELN3/PN2GUnfREf0DVLPfn9sdqrMZaclLOJpp/Zi1/RZpo52WHceXJShiu9pe A8B+3SuPgOaLfhwyqiHlWm5moc9kNF26vlrWfFjK1GrJdxMisYwQoWD5eHfQFhDX UaxlAmndwZa9 =wkGz -----END PGP SIGNATURE----- Merge tag 'net-6.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from netfilter. Current release - regressions: - core: hold instance lock during NETDEV_CHANGE - rtnetlink: fix bad unlock balance in do_setlink() - ipv6: - fix null-ptr-deref in addrconf_add_ifaddr() - align behavior across nexthops during path selection Previous releases - regressions: - sctp: prevent transport UaF in sendmsg - mptcp: only inc MPJoinAckHMacFailure for HMAC failures Previous releases - always broken: - sched: - make ->qlen_notify() idempotent - ensure sufficient space when sending filter netlink notifications - sch_sfq: really don't allow 1 packet limit - netfilter: fix incorrect avx2 match of 5th field octet - tls: explicitly disallow disconnect - eth: octeontx2-pf: fix VF root node parent queue priority" * tag 'net-6.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (38 commits) ethtool: cmis_cdb: Fix incorrect read / write length extension selftests: netfilter: add test case for recent mismatch bug nft_set_pipapo: fix incorrect avx2 match of 5th field octet net: ppp: Add bound checking for skb data on ppp_sync_txmung net: Fix null-ptr-deref by sock_lock_init_class_and_name() and rmmod. ipv6: Align behavior across nexthops during path selection net: phy: allow MDIO bus PM ops to start/stop state machine for phylink-controlled PHY net: phy: move phy_link_change() prior to mdio_bus_phy_may_suspend() selftests/tc-testing: sfq: check that a derived limit of 1 is rejected net_sched: sch_sfq: move the limit validation net_sched: sch_sfq: use a temporary work area for validating configuration net: libwx: handle page_pool_dev_alloc_pages error selftests: mptcp: validate MPJoin HMacFailure counters mptcp: only inc MPJoinAckHMacFailure for HMAC failures rtnetlink: Fix bad unlock balance in do_setlink(). net: ethtool: Don't call .cleanup_data when prepare_data fails tc: Ensure we have enough buffer space when sending filter netlink notifications net: libwx: Fix the wrong Rx descriptor field octeontx2-pf: qos: fix VF root node parent queue index selftests: tls: check that disconnect does nothing ...
This commit is contained in:
commit
ab59a86056
@ -338,10 +338,11 @@ operations directly under the netdev instance lock.
|
||||
Devices drivers are encouraged to rely on the instance lock where possible.
|
||||
|
||||
For the (mostly software) drivers that need to interact with the core stack,
|
||||
there are two sets of interfaces: ``dev_xxx`` and ``netif_xxx`` (e.g.,
|
||||
``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx`` functions handle
|
||||
acquiring the instance lock themselves, while the ``netif_xxx`` functions
|
||||
assume that the driver has already acquired the instance lock.
|
||||
there are two sets of interfaces: ``dev_xxx``/``netdev_xxx`` and ``netif_xxx``
|
||||
(e.g., ``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx``/``netdev_xxx``
|
||||
functions handle acquiring the instance lock themselves, while the
|
||||
``netif_xxx`` functions assume that the driver has already acquired
|
||||
the instance lock.
|
||||
|
||||
Notifiers and netdev instance lock
|
||||
==================================
|
||||
@ -354,6 +355,7 @@ For devices with locked ops, currently only the following notifiers are
|
||||
running under the lock:
|
||||
* ``NETDEV_REGISTER``
|
||||
* ``NETDEV_UP``
|
||||
* ``NETDEV_CHANGE``
|
||||
|
||||
The following notifiers are running without the lock:
|
||||
* ``NETDEV_UNREGISTER``
|
||||
|
@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
|
||||
|
||||
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
|
||||
} else if (level == NIX_TXSCH_LVL_TL2) {
|
||||
/* configure parent txschq */
|
||||
cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
|
||||
cfg->regval[num_regs] = (u64)hw->tx_link << 16;
|
||||
num_regs++;
|
||||
|
||||
/* configure link cfg */
|
||||
if (level == pfvf->qos.link_cfg_lvl) {
|
||||
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
|
||||
|
@ -310,7 +310,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
|
||||
return true;
|
||||
|
||||
page = page_pool_dev_alloc_pages(rx_ring->page_pool);
|
||||
WARN_ON(!page);
|
||||
if (unlikely(!page))
|
||||
return false;
|
||||
dma = page_pool_get_dma_addr(page);
|
||||
|
||||
bi->page_dma = dma;
|
||||
@ -546,7 +547,8 @@ static void wx_rx_checksum(struct wx_ring *ring,
|
||||
return;
|
||||
|
||||
/* Hardware can't guarantee csum if IPv6 Dest Header found */
|
||||
if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
|
||||
if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
|
||||
wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX))
|
||||
return;
|
||||
|
||||
/* if L4 checksum error */
|
||||
|
@ -513,6 +513,7 @@ enum WX_MSCA_CMD_value {
|
||||
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
|
||||
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
|
||||
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
|
||||
#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */
|
||||
#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
|
||||
|
||||
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
|
||||
@ -589,8 +590,6 @@ enum wx_l2_ptypes {
|
||||
|
||||
#define WX_RXD_PKTTYPE(_rxd) \
|
||||
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
|
||||
#define WX_RXD_IPV6EX(_rxd) \
|
||||
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
|
||||
/*********************** Transmit Descriptor Config Masks ****************/
|
||||
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
|
||||
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
|
||||
|
@ -244,6 +244,46 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev)
|
||||
return wol.wolopts != 0;
|
||||
}
|
||||
|
||||
static void phy_link_change(struct phy_device *phydev, bool up)
|
||||
{
|
||||
struct net_device *netdev = phydev->attached_dev;
|
||||
|
||||
if (up)
|
||||
netif_carrier_on(netdev);
|
||||
else
|
||||
netif_carrier_off(netdev);
|
||||
phydev->adjust_link(netdev);
|
||||
if (phydev->mii_ts && phydev->mii_ts->link_state)
|
||||
phydev->mii_ts->link_state(phydev->mii_ts, phydev);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_uses_state_machine - test whether consumer driver uses PAL state machine
|
||||
* @phydev: the target PHY device structure
|
||||
*
|
||||
* Ultimately, this aims to indirectly determine whether the PHY is attached
|
||||
* to a consumer which uses the state machine by calling phy_start() and
|
||||
* phy_stop().
|
||||
*
|
||||
* When the PHY driver consumer uses phylib, it must have previously called
|
||||
* phy_connect_direct() or one of its derivatives, so that phy_prepare_link()
|
||||
* has set up a hook for monitoring state changes.
|
||||
*
|
||||
* When the PHY driver is used by the MAC driver consumer through phylink (the
|
||||
* only other provider of a phy_link_change() method), using the PHY state
|
||||
* machine is not optional.
|
||||
*
|
||||
* Return: true if consumer calls phy_start() and phy_stop(), false otherwise.
|
||||
*/
|
||||
static bool phy_uses_state_machine(struct phy_device *phydev)
|
||||
{
|
||||
if (phydev->phy_link_change == phy_link_change)
|
||||
return phydev->attached_dev && phydev->adjust_link;
|
||||
|
||||
/* phydev->phy_link_change is implicitly phylink_phy_change() */
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
|
||||
{
|
||||
struct device_driver *drv = phydev->mdio.dev.driver;
|
||||
@ -310,7 +350,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
|
||||
* may call phy routines that try to grab the same lock, and that may
|
||||
* lead to a deadlock.
|
||||
*/
|
||||
if (phydev->attached_dev && phydev->adjust_link)
|
||||
if (phy_uses_state_machine(phydev))
|
||||
phy_stop_machine(phydev);
|
||||
|
||||
if (!mdio_bus_phy_may_suspend(phydev))
|
||||
@ -364,7 +404,7 @@ no_resume:
|
||||
}
|
||||
}
|
||||
|
||||
if (phydev->attached_dev && phydev->adjust_link)
|
||||
if (phy_uses_state_machine(phydev))
|
||||
phy_start_machine(phydev);
|
||||
|
||||
return 0;
|
||||
@ -1055,19 +1095,6 @@ struct phy_device *phy_find_first(struct mii_bus *bus)
|
||||
}
|
||||
EXPORT_SYMBOL(phy_find_first);
|
||||
|
||||
static void phy_link_change(struct phy_device *phydev, bool up)
|
||||
{
|
||||
struct net_device *netdev = phydev->attached_dev;
|
||||
|
||||
if (up)
|
||||
netif_carrier_on(netdev);
|
||||
else
|
||||
netif_carrier_off(netdev);
|
||||
phydev->adjust_link(netdev);
|
||||
if (phydev->mii_ts && phydev->mii_ts->link_state)
|
||||
phydev->mii_ts->link_state(phydev->mii_ts, phydev);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_prepare_link - prepares the PHY layer to monitor link status
|
||||
* @phydev: target phy_device struct
|
||||
|
@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
|
||||
unsigned char *data;
|
||||
int islcp;
|
||||
|
||||
/* Ensure we can safely access protocol field and LCP code */
|
||||
if (!pskb_may_pull(skb, 3)) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
data = skb->data;
|
||||
proto = get_unaligned_be16(data);
|
||||
|
||||
|
@ -4429,6 +4429,7 @@ void linkwatch_fire_event(struct net_device *dev);
|
||||
* pending work list (if queued).
|
||||
*/
|
||||
void linkwatch_sync_dev(struct net_device *dev);
|
||||
void __linkwatch_sync_dev(struct net_device *dev);
|
||||
|
||||
/**
|
||||
* netif_carrier_ok - test if carrier present
|
||||
@ -4974,6 +4975,7 @@ void dev_set_rx_mode(struct net_device *dev);
|
||||
int dev_set_promiscuity(struct net_device *dev, int inc);
|
||||
int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
|
||||
int dev_set_allmulti(struct net_device *dev, int inc);
|
||||
void netif_state_change(struct net_device *dev);
|
||||
void netdev_state_change(struct net_device *dev);
|
||||
void __netdev_notify_peers(struct net_device *dev);
|
||||
void netdev_notify_peers(struct net_device *dev);
|
||||
|
@ -240,6 +240,6 @@ rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
|
||||
return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
|
||||
}
|
||||
|
||||
void netdev_set_operstate(struct net_device *dev, int newstate);
|
||||
void netif_set_operstate(struct net_device *dev, int newstate);
|
||||
|
||||
#endif /* __LINUX_RTNETLINK_H */
|
||||
|
@ -775,6 +775,7 @@ struct sctp_transport {
|
||||
|
||||
/* Reference counting. */
|
||||
refcount_t refcnt;
|
||||
__u32 dead:1,
|
||||
/* RTO-Pending : A flag used to track if one of the DATA
|
||||
* chunks sent to this address is currently being
|
||||
* used to compute a RTT. If this flag is 0,
|
||||
@ -784,7 +785,7 @@ struct sctp_transport {
|
||||
* calculation completes (i.e. the DATA chunk
|
||||
* is SACK'd) clear this flag.
|
||||
*/
|
||||
__u32 rto_pending:1,
|
||||
rto_pending:1,
|
||||
|
||||
/*
|
||||
* hb_sent : a flag that signals that we have a pending
|
||||
|
@ -339,6 +339,8 @@ struct sk_filter;
|
||||
* @sk_txtime_unused: unused txtime flags
|
||||
* @ns_tracker: tracker for netns reference
|
||||
* @sk_user_frags: xarray of pages the user is holding a reference on.
|
||||
* @sk_owner: reference to the real owner of the socket that calls
|
||||
* sock_lock_init_class_and_name().
|
||||
*/
|
||||
struct sock {
|
||||
/*
|
||||
@ -547,6 +549,10 @@ struct sock {
|
||||
struct rcu_head sk_rcu;
|
||||
netns_tracker ns_tracker;
|
||||
struct xarray sk_user_frags;
|
||||
|
||||
#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
|
||||
struct module *sk_owner;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sock_bh_locked {
|
||||
@ -1583,6 +1589,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
|
||||
sk_mem_reclaim(sk);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
|
||||
static inline void sk_owner_set(struct sock *sk, struct module *owner)
|
||||
{
|
||||
__module_get(owner);
|
||||
sk->sk_owner = owner;
|
||||
}
|
||||
|
||||
static inline void sk_owner_clear(struct sock *sk)
|
||||
{
|
||||
sk->sk_owner = NULL;
|
||||
}
|
||||
|
||||
static inline void sk_owner_put(struct sock *sk)
|
||||
{
|
||||
module_put(sk->sk_owner);
|
||||
}
|
||||
#else
|
||||
static inline void sk_owner_set(struct sock *sk, struct module *owner)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sk_owner_clear(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sk_owner_put(struct sock *sk)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Macro so as to not evaluate some arguments when
|
||||
* lockdep is not enabled.
|
||||
@ -1592,13 +1627,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
|
||||
*/
|
||||
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
|
||||
do { \
|
||||
sk_owner_set(sk, THIS_MODULE); \
|
||||
sk->sk_lock.owned = 0; \
|
||||
init_waitqueue_head(&sk->sk_lock.wq); \
|
||||
spin_lock_init(&(sk)->sk_lock.slock); \
|
||||
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
|
||||
sizeof((sk)->sk_lock)); \
|
||||
sizeof((sk)->sk_lock)); \
|
||||
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
|
||||
(skey), (sname)); \
|
||||
(skey), (sname)); \
|
||||
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
|
||||
} while (0)
|
||||
|
||||
|
@ -1518,15 +1518,7 @@ void netdev_features_change(struct net_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_features_change);
|
||||
|
||||
/**
|
||||
* netdev_state_change - device changes state
|
||||
* @dev: device to cause notification
|
||||
*
|
||||
* Called to indicate a device has changed state. This function calls
|
||||
* the notifier chains for netdev_chain and sends a NEWLINK message
|
||||
* to the routing socket.
|
||||
*/
|
||||
void netdev_state_change(struct net_device *dev)
|
||||
void netif_state_change(struct net_device *dev)
|
||||
{
|
||||
if (dev->flags & IFF_UP) {
|
||||
struct netdev_notifier_change_info change_info = {
|
||||
@ -1538,7 +1530,6 @@ void netdev_state_change(struct net_device *dev)
|
||||
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_state_change);
|
||||
|
||||
/**
|
||||
* __netdev_notify_peers - notify network peers about existence of @dev,
|
||||
|
@ -327,3 +327,19 @@ int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_xdp_propagate);
|
||||
|
||||
/**
|
||||
* netdev_state_change() - device changes state
|
||||
* @dev: device to cause notification
|
||||
*
|
||||
* Called to indicate a device has changed state. This function calls
|
||||
* the notifier chains for netdev_chain and sends a NEWLINK message
|
||||
* to the routing socket.
|
||||
*/
|
||||
void netdev_state_change(struct net_device *dev)
|
||||
{
|
||||
netdev_lock_ops(dev);
|
||||
netif_state_change(dev);
|
||||
netdev_unlock_ops(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_state_change);
|
||||
|
@ -183,7 +183,7 @@ static void linkwatch_do_dev(struct net_device *dev)
|
||||
else
|
||||
dev_deactivate(dev);
|
||||
|
||||
netdev_state_change(dev);
|
||||
netif_state_change(dev);
|
||||
}
|
||||
/* Note: our callers are responsible for calling netdev_tracker_free().
|
||||
* This is the reason we use __dev_put() instead of dev_put().
|
||||
@ -240,7 +240,9 @@ static void __linkwatch_run_queue(int urgent_only)
|
||||
*/
|
||||
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
|
||||
spin_unlock_irq(&lweventlist_lock);
|
||||
netdev_lock_ops(dev);
|
||||
linkwatch_do_dev(dev);
|
||||
netdev_unlock_ops(dev);
|
||||
do_dev--;
|
||||
spin_lock_irq(&lweventlist_lock);
|
||||
}
|
||||
@ -253,25 +255,41 @@ static void __linkwatch_run_queue(int urgent_only)
|
||||
spin_unlock_irq(&lweventlist_lock);
|
||||
}
|
||||
|
||||
void linkwatch_sync_dev(struct net_device *dev)
|
||||
static bool linkwatch_clean_dev(struct net_device *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
int clean = 0;
|
||||
bool clean = false;
|
||||
|
||||
spin_lock_irqsave(&lweventlist_lock, flags);
|
||||
if (!list_empty(&dev->link_watch_list)) {
|
||||
list_del_init(&dev->link_watch_list);
|
||||
clean = 1;
|
||||
clean = true;
|
||||
/* We must release netdev tracker under
|
||||
* the spinlock protection.
|
||||
*/
|
||||
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
|
||||
}
|
||||
spin_unlock_irqrestore(&lweventlist_lock, flags);
|
||||
if (clean)
|
||||
|
||||
return clean;
|
||||
}
|
||||
|
||||
void __linkwatch_sync_dev(struct net_device *dev)
|
||||
{
|
||||
netdev_ops_assert_locked(dev);
|
||||
|
||||
if (linkwatch_clean_dev(dev))
|
||||
linkwatch_do_dev(dev);
|
||||
}
|
||||
|
||||
void linkwatch_sync_dev(struct net_device *dev)
|
||||
{
|
||||
if (linkwatch_clean_dev(dev)) {
|
||||
netdev_lock_ops(dev);
|
||||
linkwatch_do_dev(dev);
|
||||
netdev_unlock_ops(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with the rtnl semaphore held */
|
||||
void linkwatch_run_queue(void)
|
||||
|
@ -20,11 +20,11 @@ int netdev_debug_event(struct notifier_block *nb, unsigned long event,
|
||||
switch (cmd) {
|
||||
case NETDEV_REGISTER:
|
||||
case NETDEV_UP:
|
||||
case NETDEV_CHANGE:
|
||||
netdev_ops_assert_locked(dev);
|
||||
fallthrough;
|
||||
case NETDEV_DOWN:
|
||||
case NETDEV_REBOOT:
|
||||
case NETDEV_CHANGE:
|
||||
case NETDEV_UNREGISTER:
|
||||
case NETDEV_CHANGEMTU:
|
||||
case NETDEV_CHANGEADDR:
|
||||
|
@ -1043,7 +1043,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
|
||||
|
||||
void netdev_set_operstate(struct net_device *dev, int newstate)
|
||||
void netif_set_operstate(struct net_device *dev, int newstate)
|
||||
{
|
||||
unsigned int old = READ_ONCE(dev->operstate);
|
||||
|
||||
@ -1052,9 +1052,9 @@ void netdev_set_operstate(struct net_device *dev, int newstate)
|
||||
return;
|
||||
} while (!try_cmpxchg(&dev->operstate, &old, newstate));
|
||||
|
||||
netdev_state_change(dev);
|
||||
netif_state_change(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_set_operstate);
|
||||
EXPORT_SYMBOL(netif_set_operstate);
|
||||
|
||||
static void set_operstate(struct net_device *dev, unsigned char transition)
|
||||
{
|
||||
@ -1080,7 +1080,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
|
||||
break;
|
||||
}
|
||||
|
||||
netdev_set_operstate(dev, operstate);
|
||||
netif_set_operstate(dev, operstate);
|
||||
}
|
||||
|
||||
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
|
||||
@ -3027,7 +3027,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
err = validate_linkmsg(dev, tb, extack);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_IFNAME])
|
||||
nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
|
||||
@ -3396,7 +3396,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
|
||||
errout:
|
||||
if (status & DO_SETLINK_MODIFIED) {
|
||||
if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
|
||||
netdev_state_change(dev);
|
||||
netif_state_change(dev);
|
||||
|
||||
if (err < 0)
|
||||
net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
|
||||
@ -3676,8 +3676,11 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
|
||||
nla_len(tb[IFLA_BROADCAST]));
|
||||
if (tb[IFLA_TXQLEN])
|
||||
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
|
||||
if (tb[IFLA_OPERSTATE])
|
||||
if (tb[IFLA_OPERSTATE]) {
|
||||
netdev_lock_ops(dev);
|
||||
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
|
||||
netdev_unlock_ops(dev);
|
||||
}
|
||||
if (tb[IFLA_LINKMODE])
|
||||
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
|
||||
if (tb[IFLA_GROUP])
|
||||
|
@ -2130,6 +2130,8 @@ lenout:
|
||||
*/
|
||||
static inline void sock_lock_init(struct sock *sk)
|
||||
{
|
||||
sk_owner_clear(sk);
|
||||
|
||||
if (sk->sk_kern_sock)
|
||||
sock_lock_init_class_and_name(
|
||||
sk,
|
||||
@ -2226,6 +2228,9 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
cgroup_sk_free(&sk->sk_cgrp_data);
|
||||
mem_cgroup_sk_free(sk);
|
||||
security_sk_free(sk);
|
||||
|
||||
sk_owner_put(sk);
|
||||
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
|
@ -101,7 +101,6 @@ struct ethtool_cmis_cdb_rpl {
|
||||
};
|
||||
|
||||
u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs);
|
||||
u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs);
|
||||
|
||||
void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
|
||||
enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
|
||||
|
@ -16,15 +16,6 @@ u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs)
|
||||
return 8 * (1 + min_t(u8, num_of_byte_octs, 15));
|
||||
}
|
||||
|
||||
/* For accessing the EPL field on page 9Fh, the allowable length extension is
|
||||
* min(i, 255) byte octets where i specifies the allowable additional number of
|
||||
* byte octets in a READ or a WRITE.
|
||||
*/
|
||||
u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs)
|
||||
{
|
||||
return 8 * (1 + min_t(u8, num_of_byte_octs, 255));
|
||||
}
|
||||
|
||||
void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
|
||||
enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
|
||||
u8 lpl_len, u8 *epl, u16 epl_len,
|
||||
@ -33,19 +24,16 @@ void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
|
||||
{
|
||||
args->req.id = cpu_to_be16(cmd);
|
||||
args->req.lpl_len = lpl_len;
|
||||
if (lpl) {
|
||||
if (lpl)
|
||||
memcpy(args->req.payload, lpl, args->req.lpl_len);
|
||||
args->read_write_len_ext =
|
||||
ethtool_cmis_get_max_lpl_size(read_write_len_ext);
|
||||
}
|
||||
if (epl) {
|
||||
args->req.epl_len = cpu_to_be16(epl_len);
|
||||
args->req.epl = epl;
|
||||
args->read_write_len_ext =
|
||||
ethtool_cmis_get_max_epl_size(read_write_len_ext);
|
||||
}
|
||||
|
||||
args->max_duration = max_duration;
|
||||
args->read_write_len_ext =
|
||||
ethtool_cmis_get_max_lpl_size(read_write_len_ext);
|
||||
args->msleep_pre_rpl = msleep_pre_rpl;
|
||||
args->rpl_exp_len = rpl_exp_len;
|
||||
args->flags = flags;
|
||||
|
@ -830,6 +830,7 @@ void ethtool_ringparam_get_cfg(struct net_device *dev,
|
||||
|
||||
/* Driver gives us current state, we want to return current config */
|
||||
kparam->tcp_data_split = dev->cfg->hds_config;
|
||||
kparam->hds_thresh = dev->cfg->hds_thresh;
|
||||
}
|
||||
|
||||
static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info)
|
||||
|
@ -60,7 +60,7 @@ static struct devlink *netdev_to_devlink_get(struct net_device *dev)
|
||||
u32 ethtool_op_get_link(struct net_device *dev)
|
||||
{
|
||||
/* Synchronize carrier state with link watch, see also rtnl_getlink() */
|
||||
linkwatch_sync_dev(dev);
|
||||
__linkwatch_sync_dev(dev);
|
||||
|
||||
return netif_carrier_ok(dev) ? 1 : 0;
|
||||
}
|
||||
|
@ -500,7 +500,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
netdev_unlock_ops(req_info->dev);
|
||||
rtnl_unlock();
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
goto err_dev;
|
||||
ret = ops->reply_size(req_info, reply_data);
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
@ -560,7 +560,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
|
||||
netdev_unlock_ops(dev);
|
||||
rtnl_unlock();
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto out_cancel;
|
||||
ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -569,6 +569,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
|
||||
out:
|
||||
if (ctx->ops->cleanup_data)
|
||||
ctx->ops->cleanup_data(ctx->reply_data);
|
||||
out_cancel:
|
||||
ctx->reply_data->dev = NULL;
|
||||
if (ret < 0)
|
||||
genlmsg_cancel(skb, ehdr);
|
||||
@ -793,7 +794,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
|
||||
ethnl_init_reply_data(reply_data, ops, dev);
|
||||
ret = ops->prepare_data(req_info, reply_data, &info);
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
goto err_rep;
|
||||
ret = ops->reply_size(req_info, reply_data);
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
@ -828,6 +829,7 @@ err_skb:
|
||||
err_cleanup:
|
||||
if (ops->cleanup_data)
|
||||
ops->cleanup_data(reply_data);
|
||||
err_rep:
|
||||
kfree(reply_data);
|
||||
kfree(req_info);
|
||||
return;
|
||||
|
@ -33,14 +33,14 @@ static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
|
||||
struct net_device *dev = master->dev;
|
||||
|
||||
if (!is_admin_up(dev)) {
|
||||
netdev_set_operstate(dev, IF_OPER_DOWN);
|
||||
netif_set_operstate(dev, IF_OPER_DOWN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_carrier)
|
||||
netdev_set_operstate(dev, IF_OPER_UP);
|
||||
netif_set_operstate(dev, IF_OPER_UP);
|
||||
else
|
||||
netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
|
||||
netif_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
|
||||
}
|
||||
|
||||
static bool hsr_check_carrier(struct hsr_port *master)
|
||||
|
@ -3154,12 +3154,13 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
|
||||
|
||||
rtnl_net_lock(net);
|
||||
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
|
||||
netdev_lock_ops(dev);
|
||||
if (dev)
|
||||
if (dev) {
|
||||
netdev_lock_ops(dev);
|
||||
err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
|
||||
else
|
||||
netdev_unlock_ops(dev);
|
||||
} else {
|
||||
err = -ENODEV;
|
||||
netdev_unlock_ops(dev);
|
||||
}
|
||||
rtnl_net_unlock(net);
|
||||
return err;
|
||||
}
|
||||
|
@ -470,10 +470,10 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
|
||||
goto out;
|
||||
|
||||
hash = fl6->mp_hash;
|
||||
if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) &&
|
||||
rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
|
||||
strict) >= 0) {
|
||||
match = first;
|
||||
if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
|
||||
if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
|
||||
strict) >= 0)
|
||||
match = first;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -899,13 +899,17 @@ create_child:
|
||||
goto dispose_child;
|
||||
}
|
||||
|
||||
if (!subflow_hmac_valid(req, &mp_opt) ||
|
||||
!mptcp_can_accept_new_subflow(subflow_req->msk)) {
|
||||
if (!subflow_hmac_valid(req, &mp_opt)) {
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
|
||||
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
|
||||
goto dispose_child;
|
||||
}
|
||||
|
||||
if (!mptcp_can_accept_new_subflow(owner)) {
|
||||
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
|
||||
goto dispose_child;
|
||||
}
|
||||
|
||||
/* move the msk reference ownership to the subflow */
|
||||
subflow_req->msk = NULL;
|
||||
ctx->conn = (struct sock *)owner;
|
||||
|
@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
|
||||
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize);
|
||||
|
||||
NFT_PIPAPO_AVX2_AND(6, 2, 3);
|
||||
NFT_PIPAPO_AVX2_AND(3, 4, 7);
|
||||
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize);
|
||||
NFT_PIPAPO_AVX2_AND(0, 4, 5);
|
||||
NFT_PIPAPO_AVX2_AND(0, 3, 5);
|
||||
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
|
||||
NFT_PIPAPO_AVX2_AND(2, 6, 7);
|
||||
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);
|
||||
|
@ -2057,6 +2057,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
|
||||
struct tcmsg *tcm;
|
||||
struct nlmsghdr *nlh;
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
int ret = -EMSGSIZE;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
|
||||
if (!nlh)
|
||||
@ -2101,11 +2102,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
|
||||
|
||||
return skb->len;
|
||||
|
||||
cls_op_not_supp:
|
||||
ret = -EOPNOTSUPP;
|
||||
out_nlmsg_trim:
|
||||
nla_put_failure:
|
||||
cls_op_not_supp:
|
||||
nlmsg_trim(skb, b);
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sk_buff *tfilter_notify_prep(struct net *net,
|
||||
struct sk_buff *oskb,
|
||||
struct nlmsghdr *n,
|
||||
struct tcf_proto *tp,
|
||||
struct tcf_block *block,
|
||||
struct Qdisc *q, u32 parent,
|
||||
void *fh, int event,
|
||||
u32 portid, bool rtnl_held,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
skb = alloc_skb(size, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOBUFS);
|
||||
|
||||
ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
|
||||
n->nlmsg_seq, n->nlmsg_flags, event, false,
|
||||
rtnl_held, extack);
|
||||
if (ret <= 0) {
|
||||
kfree_skb(skb);
|
||||
if (ret == -EMSGSIZE) {
|
||||
size += NLMSG_GOODSIZE;
|
||||
goto retry;
|
||||
}
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
||||
@ -2121,16 +2156,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
||||
if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
|
||||
return 0;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
|
||||
n->nlmsg_seq, n->nlmsg_flags, event,
|
||||
false, rtnl_held, extack) <= 0) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
|
||||
portid, rtnl_held, extack);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
if (unicast)
|
||||
err = rtnl_unicast(skb, net, portid);
|
||||
@ -2153,16 +2182,11 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
||||
if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
|
||||
return tp->ops->delete(tp, fh, last, rtnl_held, extack);
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
|
||||
n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
|
||||
false, rtnl_held, extack) <= 0) {
|
||||
skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
|
||||
RTM_DELTFILTER, portid, rtnl_held, extack);
|
||||
if (IS_ERR(skb)) {
|
||||
NL_SET_ERR_MSG(extack, "Failed to build del event notification");
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
|
||||
err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
|
||||
|
@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
|
||||
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
|
||||
drop_func, dequeue_func);
|
||||
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->stats.drop_count && sch->q.qlen) {
|
||||
if (q->stats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
|
@ -105,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
return -ENOBUFS;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
cl->common.classid = classid;
|
||||
cl->quantum = quantum;
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
||||
@ -229,7 +230,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
|
||||
{
|
||||
struct drr_class *cl = (struct drr_class *)arg;
|
||||
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
}
|
||||
|
||||
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
|
||||
@ -390,7 +391,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
|
||||
if (unlikely(skb == NULL))
|
||||
goto out;
|
||||
if (cl->qdisc->q.qlen == 0)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
@ -431,7 +432,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||
if (cl->qdisc->q.qlen)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
* to remove them.
|
||||
*/
|
||||
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
}
|
||||
|
||||
static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
|
||||
@ -488,7 +488,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
|
||||
if (unlikely(!skb))
|
||||
goto out;
|
||||
if (cl->qdisc->q.qlen == 0)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
return ets_qdisc_dequeue_skb(sch, skb);
|
||||
}
|
||||
|
||||
@ -657,7 +657,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
}
|
||||
for (i = q->nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del(&q->classes[i].alist);
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_tree_flush_backlog(q->classes[i].qdisc);
|
||||
}
|
||||
WRITE_ONCE(q->nstrict, nstrict);
|
||||
@ -713,7 +713,7 @@ static void ets_qdisc_reset(struct Qdisc *sch)
|
||||
|
||||
for (band = q->nstrict; band < q->nbands; band++) {
|
||||
if (q->classes[band].qdisc->q.qlen)
|
||||
list_del(&q->classes[band].alist);
|
||||
list_del_init(&q->classes[band].alist);
|
||||
}
|
||||
for (band = 0; band < q->nbands; band++)
|
||||
qdisc_reset(q->classes[band].qdisc);
|
||||
|
@ -315,10 +315,8 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->cstats.drop_count && sch->q.qlen) {
|
||||
|
||||
if (q->cstats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
q->cstats.drop_len);
|
||||
q->cstats.drop_count = 0;
|
||||
|
@ -203,7 +203,10 @@ eltree_insert(struct hfsc_class *cl)
|
||||
static inline void
|
||||
eltree_remove(struct hfsc_class *cl)
|
||||
{
|
||||
rb_erase(&cl->el_node, &cl->sched->eligible);
|
||||
if (!RB_EMPTY_NODE(&cl->el_node)) {
|
||||
rb_erase(&cl->el_node, &cl->sched->eligible);
|
||||
RB_CLEAR_NODE(&cl->el_node);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1220,7 +1223,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
||||
* needs to be called explicitly to remove a class from vttree.
|
||||
*/
|
||||
update_vf(cl, 0, 0);
|
||||
if (cl->cl_nactive)
|
||||
update_vf(cl, 0, 0);
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
eltree_remove(cl);
|
||||
}
|
||||
|
@ -1485,6 +1485,8 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
{
|
||||
struct htb_class *cl = (struct htb_class *)arg;
|
||||
|
||||
if (!cl->prio_activity)
|
||||
return;
|
||||
htb_deactivate(qdisc_priv(sch), cl);
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
|
||||
struct qfq_aggregate *agg = cl->agg;
|
||||
|
||||
|
||||
list_del(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
if (list_empty(&agg->active)) /* agg is now inactive */
|
||||
qfq_deactivate_agg(q, agg);
|
||||
}
|
||||
@ -474,6 +474,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
cl->common.classid = classid;
|
||||
cl->deficit = lmax;
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
classid, NULL);
|
||||
@ -982,7 +983,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
|
||||
cl->deficit -= (int) len;
|
||||
|
||||
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
|
||||
cl->deficit += agg->lmax;
|
||||
list_move_tail(&cl->alist, &agg->active);
|
||||
@ -1415,6 +1416,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
struct qfq_sched *q = qdisc_priv(sch);
|
||||
struct qfq_class *cl = (struct qfq_class *)arg;
|
||||
|
||||
if (list_empty(&cl->alist))
|
||||
return;
|
||||
qfq_deactivate_class(q, cl);
|
||||
}
|
||||
|
||||
|
@ -631,6 +631,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
struct red_parms *p = NULL;
|
||||
struct sk_buff *to_free = NULL;
|
||||
struct sk_buff *tail = NULL;
|
||||
unsigned int maxflows;
|
||||
unsigned int quantum;
|
||||
unsigned int divisor;
|
||||
int perturb_period;
|
||||
u8 headdrop;
|
||||
u8 maxdepth;
|
||||
int limit;
|
||||
u8 flags;
|
||||
|
||||
|
||||
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
|
||||
return -EINVAL;
|
||||
@ -652,39 +661,64 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (ctl->limit == 1) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid limit");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sch_tree_lock(sch);
|
||||
|
||||
limit = q->limit;
|
||||
divisor = q->divisor;
|
||||
headdrop = q->headdrop;
|
||||
maxdepth = q->maxdepth;
|
||||
maxflows = q->maxflows;
|
||||
perturb_period = q->perturb_period;
|
||||
quantum = q->quantum;
|
||||
flags = q->flags;
|
||||
|
||||
/* update and validate configuration */
|
||||
if (ctl->quantum)
|
||||
q->quantum = ctl->quantum;
|
||||
WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
|
||||
quantum = ctl->quantum;
|
||||
perturb_period = ctl->perturb_period * HZ;
|
||||
if (ctl->flows)
|
||||
q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
|
||||
maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
|
||||
if (ctl->divisor) {
|
||||
q->divisor = ctl->divisor;
|
||||
q->maxflows = min_t(u32, q->maxflows, q->divisor);
|
||||
divisor = ctl->divisor;
|
||||
maxflows = min_t(u32, maxflows, divisor);
|
||||
}
|
||||
if (ctl_v1) {
|
||||
if (ctl_v1->depth)
|
||||
q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
|
||||
maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
|
||||
if (p) {
|
||||
swap(q->red_parms, p);
|
||||
red_set_parms(q->red_parms,
|
||||
red_set_parms(p,
|
||||
ctl_v1->qth_min, ctl_v1->qth_max,
|
||||
ctl_v1->Wlog,
|
||||
ctl_v1->Plog, ctl_v1->Scell_log,
|
||||
NULL,
|
||||
ctl_v1->max_P);
|
||||
}
|
||||
q->flags = ctl_v1->flags;
|
||||
q->headdrop = ctl_v1->headdrop;
|
||||
flags = ctl_v1->flags;
|
||||
headdrop = ctl_v1->headdrop;
|
||||
}
|
||||
if (ctl->limit) {
|
||||
q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
|
||||
q->maxflows = min_t(u32, q->maxflows, q->limit);
|
||||
limit = min_t(u32, ctl->limit, maxdepth * maxflows);
|
||||
maxflows = min_t(u32, maxflows, limit);
|
||||
}
|
||||
if (limit == 1) {
|
||||
sch_tree_unlock(sch);
|
||||
kfree(p);
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid limit");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* commit configuration */
|
||||
q->limit = limit;
|
||||
q->divisor = divisor;
|
||||
q->headdrop = headdrop;
|
||||
q->maxdepth = maxdepth;
|
||||
q->maxflows = maxflows;
|
||||
WRITE_ONCE(q->perturb_period, perturb_period);
|
||||
q->quantum = quantum;
|
||||
q->flags = flags;
|
||||
if (p)
|
||||
swap(q->red_parms, p);
|
||||
|
||||
qlen = sch->q.qlen;
|
||||
while (sch->q.qlen > q->limit) {
|
||||
|
@ -72,8 +72,9 @@
|
||||
/* Forward declarations for internal helper functions. */
|
||||
static bool sctp_writeable(const struct sock *sk);
|
||||
static void sctp_wfree(struct sk_buff *skb);
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
size_t msg_len);
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
|
||||
struct sctp_transport *transport,
|
||||
long *timeo_p, size_t msg_len);
|
||||
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
|
||||
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
|
||||
static int sctp_wait_for_accept(struct sock *sk, long timeo);
|
||||
@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
|
||||
|
||||
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
||||
err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len);
|
||||
if (err)
|
||||
goto err;
|
||||
if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
|
||||
@ -9214,8 +9215,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
|
||||
|
||||
|
||||
/* Helper function to wait for space in the sndbuf. */
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
size_t msg_len)
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
|
||||
struct sctp_transport *transport,
|
||||
long *timeo_p, size_t msg_len)
|
||||
{
|
||||
struct sock *sk = asoc->base.sk;
|
||||
long current_timeo = *timeo_p;
|
||||
@ -9225,7 +9227,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
|
||||
*timeo_p, msg_len);
|
||||
|
||||
/* Increment the association's refcnt. */
|
||||
/* Increment the transport and association's refcnt. */
|
||||
if (transport)
|
||||
sctp_transport_hold(transport);
|
||||
sctp_association_hold(asoc);
|
||||
|
||||
/* Wait on the association specific sndbuf space. */
|
||||
@ -9234,7 +9238,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (asoc->base.dead)
|
||||
goto do_dead;
|
||||
if (!*timeo_p)
|
||||
if ((!*timeo_p) || (transport && transport->dead))
|
||||
goto do_nonblock;
|
||||
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
|
||||
goto do_error;
|
||||
@ -9259,7 +9263,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
out:
|
||||
finish_wait(&asoc->wait, &wait);
|
||||
|
||||
/* Release the association's refcnt. */
|
||||
/* Release the transport and association's refcnt. */
|
||||
if (transport)
|
||||
sctp_transport_put(transport);
|
||||
sctp_association_put(asoc);
|
||||
|
||||
return err;
|
||||
|
@ -117,6 +117,8 @@ fail:
|
||||
*/
|
||||
void sctp_transport_free(struct sctp_transport *transport)
|
||||
{
|
||||
transport->dead = 1;
|
||||
|
||||
/* Try to delete the heartbeat timer. */
|
||||
if (timer_delete(&transport->hb_timer))
|
||||
sctp_transport_put(transport);
|
||||
|
@ -1046,6 +1046,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
|
||||
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
|
||||
if (imp == TIPC_SYSTEM_IMPORTANCE) {
|
||||
pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
|
||||
__skb_queue_purge(list);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
rc = link_schedule_user(l, hdr);
|
||||
|
@ -852,6 +852,11 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
|
||||
return do_tls_setsockopt(sk, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static int tls_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
struct tls_context *tls_ctx_create(struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
@ -947,6 +952,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
||||
prot[TLS_BASE][TLS_BASE] = *base;
|
||||
prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
|
||||
prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
|
||||
prot[TLS_BASE][TLS_BASE].disconnect = tls_disconnect;
|
||||
prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
|
||||
|
||||
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
|
||||
|
@ -6,7 +6,7 @@ import os
|
||||
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx
|
||||
from lib.py import CmdExitFailure, EthtoolFamily, NlError
|
||||
from lib.py import NetDrvEnv
|
||||
from lib.py import defer, ethtool, ip
|
||||
from lib.py import defer, ethtool, ip, random
|
||||
|
||||
|
||||
def _get_hds_mode(cfg, netnl) -> str:
|
||||
@ -109,6 +109,36 @@ def set_hds_thresh_zero(cfg, netnl) -> None:
|
||||
|
||||
ksft_eq(0, rings['hds-thresh'])
|
||||
|
||||
def set_hds_thresh_random(cfg, netnl) -> None:
|
||||
try:
|
||||
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
|
||||
except NlError as e:
|
||||
raise KsftSkipEx('ring-get not supported by device')
|
||||
if 'hds-thresh' not in rings:
|
||||
raise KsftSkipEx('hds-thresh not supported by device')
|
||||
if 'hds-thresh-max' not in rings:
|
||||
raise KsftSkipEx('hds-thresh-max not defined by device')
|
||||
|
||||
if rings['hds-thresh-max'] < 2:
|
||||
raise KsftSkipEx('hds-thresh-max is too small')
|
||||
elif rings['hds-thresh-max'] == 2:
|
||||
hds_thresh = 1
|
||||
else:
|
||||
while True:
|
||||
hds_thresh = random.randint(1, rings['hds-thresh-max'] - 1)
|
||||
if hds_thresh != rings['hds-thresh']:
|
||||
break
|
||||
|
||||
try:
|
||||
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_thresh})
|
||||
except NlError as e:
|
||||
if e.error == errno.EINVAL:
|
||||
raise KsftSkipEx("hds-thresh-set not supported by the device")
|
||||
elif e.error == errno.EOPNOTSUPP:
|
||||
raise KsftSkipEx("ring-set not supported by the device")
|
||||
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
|
||||
ksft_eq(hds_thresh, rings['hds-thresh'])
|
||||
|
||||
def set_hds_thresh_max(cfg, netnl) -> None:
|
||||
try:
|
||||
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
|
||||
@ -243,6 +273,7 @@ def main() -> None:
|
||||
get_hds_thresh,
|
||||
set_hds_disable,
|
||||
set_hds_enable,
|
||||
set_hds_thresh_random,
|
||||
set_hds_thresh_zero,
|
||||
set_hds_thresh_max,
|
||||
set_hds_thresh_gt,
|
||||
|
@ -1441,6 +1441,15 @@ chk_join_nr()
|
||||
fi
|
||||
fi
|
||||
|
||||
count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckHMacFailure")
|
||||
if [ -z "$count" ]; then
|
||||
rc=${KSFT_SKIP}
|
||||
elif [ "$count" != "0" ]; then
|
||||
rc=${KSFT_FAIL}
|
||||
print_check "synack HMAC"
|
||||
fail_test "got $count JOIN[s] synack HMAC failure expected 0"
|
||||
fi
|
||||
|
||||
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
|
||||
if [ -z "$count" ]; then
|
||||
rc=${KSFT_SKIP}
|
||||
@ -1450,6 +1459,15 @@ chk_join_nr()
|
||||
fail_test "got $count JOIN[s] ack rx expected $ack_nr"
|
||||
fi
|
||||
|
||||
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckHMacFailure")
|
||||
if [ -z "$count" ]; then
|
||||
rc=${KSFT_SKIP}
|
||||
elif [ "$count" != "0" ]; then
|
||||
rc=${KSFT_FAIL}
|
||||
print_check "ack HMAC"
|
||||
fail_test "got $count JOIN[s] ack HMAC failure expected 0"
|
||||
fi
|
||||
|
||||
print_results "join Rx" ${rc}
|
||||
|
||||
join_syn_tx="${join_syn_tx:-${syn_nr}}" \
|
||||
|
@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
|
||||
net6_port_net6_port net_port_mac_proto_net"
|
||||
|
||||
# Reported bugs, also described by TYPE_ variables below
|
||||
BUGS="flush_remove_add reload net_port_proto_match"
|
||||
BUGS="flush_remove_add reload net_port_proto_match avx2_mismatch"
|
||||
|
||||
# List of possible paths to pktgen script from kernel tree for performance tests
|
||||
PKTGEN_SCRIPT_PATHS="
|
||||
@ -387,6 +387,25 @@ race_repeat 0
|
||||
|
||||
perf_duration 0
|
||||
"
|
||||
|
||||
TYPE_avx2_mismatch="
|
||||
display avx2 false match
|
||||
type_spec inet_proto . ipv6_addr
|
||||
chain_spec meta l4proto . ip6 daddr
|
||||
dst proto addr6
|
||||
src
|
||||
start 1
|
||||
count 1
|
||||
src_delta 1
|
||||
tools ping
|
||||
proto icmp6
|
||||
|
||||
race_repeat 0
|
||||
|
||||
perf_duration 0
|
||||
"
|
||||
|
||||
|
||||
# Set template for all tests, types and rules are filled in depending on test
|
||||
set_template='
|
||||
flush ruleset
|
||||
@ -1629,6 +1648,24 @@ test_bug_net_port_proto_match() {
|
||||
nft flush ruleset
|
||||
}
|
||||
|
||||
test_bug_avx2_mismatch()
|
||||
{
|
||||
setup veth send_"${proto}" set || return ${ksft_skip}
|
||||
|
||||
local a1="fe80:dead:01ff:0a02:0b03:6007:8009:a001"
|
||||
local a2="fe80:dead:01fe:0a02:0b03:6007:8009:a001"
|
||||
|
||||
nft "add element inet filter test { icmpv6 . $a1 }"
|
||||
|
||||
dst_addr6="$a2"
|
||||
send_icmp6
|
||||
|
||||
if [ "$(count_packets)" -gt "0" ]; then
|
||||
err "False match for $a2"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_reported_issues() {
|
||||
eval test_bug_"${subtest}"
|
||||
}
|
||||
|
@ -1753,6 +1753,42 @@ TEST_F(tls_basic, rekey_tx)
|
||||
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
|
||||
}
|
||||
|
||||
TEST_F(tls_basic, disconnect)
|
||||
{
|
||||
char const *test_str = "test_message";
|
||||
int send_len = strlen(test_str) + 1;
|
||||
struct tls_crypto_info_keys key;
|
||||
struct sockaddr_in addr;
|
||||
char buf[20];
|
||||
int ret;
|
||||
|
||||
if (self->notls)
|
||||
return;
|
||||
|
||||
tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128,
|
||||
&key, 0);
|
||||
|
||||
ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &key, key.len);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
/* Pre-queue the data so that setsockopt parses it but doesn't
|
||||
* dequeue it from the TCP socket. recvmsg would dequeue.
|
||||
*/
|
||||
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
|
||||
|
||||
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &key, key.len);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
addr.sin_family = AF_UNSPEC;
|
||||
addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
addr.sin_port = 0;
|
||||
ret = connect(self->cfd, &addr, sizeof(addr));
|
||||
EXPECT_EQ(ret, -1);
|
||||
EXPECT_EQ(errno, EOPNOTSUPP);
|
||||
|
||||
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
|
||||
}
|
||||
|
||||
TEST_F(tls, rekey)
|
||||
{
|
||||
char const *test_str_1 = "test_message_before_rekey";
|
||||
|
@ -158,5 +158,160 @@
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a4bb",
|
||||
"name": "Test FQ_CODEL with HTB parent - force packet drop with empty queue",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_codel",
|
||||
"htb"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY handle 1: root htb default 10",
|
||||
"$TC class add dev $DUMMY parent 1: classid 1:10 htb rate 1kbit",
|
||||
"$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
|
||||
"$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
|
||||
"ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
|
||||
"sleep 0.1"
|
||||
],
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
|
||||
"matchPattern": "dropped [1-9][0-9]*",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a4be",
|
||||
"name": "Test FQ_CODEL with QFQ parent - force packet drop with empty queue",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_codel",
|
||||
"qfq"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY handle 1: root qfq",
|
||||
"$TC class add dev $DUMMY parent 1: classid 1:10 qfq weight 1 maxpkt 1000",
|
||||
"$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
|
||||
"$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
|
||||
"ping -c 10 -s 1000 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
|
||||
"sleep 0.1"
|
||||
],
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
|
||||
"matchPattern": "dropped [1-9][0-9]*",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a4bf",
|
||||
"name": "Test FQ_CODEL with HFSC parent - force packet drop with empty queue",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_codel",
|
||||
"hfsc"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 10",
|
||||
"$TC class add dev $DUMMY parent 1: classid 1:10 hfsc sc rate 1kbit ul rate 1kbit",
|
||||
"$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
|
||||
"$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
|
||||
"ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
|
||||
"sleep 0.1"
|
||||
],
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
|
||||
"matchPattern": "dropped [1-9][0-9]*",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a4c0",
|
||||
"name": "Test FQ_CODEL with DRR parent - force packet drop with empty queue",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_codel",
|
||||
"drr"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY handle 1: root drr",
|
||||
"$TC class add dev $DUMMY parent 1: classid 1:10 drr quantum 1500",
|
||||
"$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
|
||||
"$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
|
||||
"ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
|
||||
"sleep 0.1"
|
||||
],
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
|
||||
"matchPattern": "dropped [1-9][0-9]*",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a4c1",
|
||||
"name": "Test FQ_CODEL with ETS parent - force packet drop with empty queue",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_codel",
|
||||
"ets"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY handle 1: root ets bands 2 strict 1",
|
||||
"$TC class change dev $DUMMY parent 1: classid 1:1 ets",
|
||||
"$TC qdisc add dev $DUMMY parent 1:1 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
|
||||
"$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1",
|
||||
"ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
|
||||
"sleep 0.1"
|
||||
],
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
|
||||
"matchPattern": "dropped [1-9][0-9]*",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root",
|
||||
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -228,5 +228,41 @@
|
||||
"matchCount": "0",
|
||||
"teardown": [
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "7f8f",
|
||||
"name": "Check that a derived limit of 1 is rejected (limit 2 depth 1 flows 1)",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"sfq"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [],
|
||||
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq limit 2 depth 1 flows 1",
|
||||
"expExitCode": "2",
|
||||
"verifyCmd": "$TC qdisc show dev $DUMMY",
|
||||
"matchPattern": "sfq",
|
||||
"matchCount": "0",
|
||||
"teardown": []
|
||||
},
|
||||
{
|
||||
"id": "5168",
|
||||
"name": "Check that a derived limit of 1 is rejected (limit 2 depth 1 divisor 1)",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"sfq"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [],
|
||||
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq limit 2 depth 1 divisor 1",
|
||||
"expExitCode": "2",
|
||||
"verifyCmd": "$TC qdisc show dev $DUMMY",
|
||||
"matchPattern": "sfq",
|
||||
"matchCount": "0",
|
||||
"teardown": []
|
||||
}
|
||||
]
|
||||
|
Loading…
x
Reference in New Issue
Block a user