Merge tag 'net-6.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Paolo Abeni:
"Including fixes from bluetooth, can and IPsec.
Current release - regressions:
- net: add net.core.qdisc_max_burst
- can: propagate CAN device capabilities via ml_priv
Previous releases - regressions:
- dst: fix races in rt6_uncached_list_del() and
rt_del_uncached_list()
- ipv6: fix use-after-free in inet6_addr_del().
- xfrm: fix inner mode lookup in tunnel mode GSO segmentation
- ip_tunnel: spread netdev_lockdep_set_classes()
- ip6_tunnel: use skb_vlan_inet_prepare() in __ip6_tnl_rcv()
- bluetooth: hci_sync: enable PA sync lost event
- eth: virtio-net:
- fix the deadlock when disabling rx NAPI
- fix misalignment bug in struct virtnet_info
Previous releases - always broken:
- ipv4: ip_gre: make ipgre_header() robust
- can: fix SSP_SRC in cases when bit-rate is higher than 1 MBit.
- eth:
- mlx5e: profile change fix
- octeon_ep_vf: fix free_irq dev_id mismatch in IRQ rollback
- macvlan: fix possible UAF in macvlan_forward_source()"
* tag 'net-6.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (37 commits)
virtio_net: Fix misalignment bug in struct virtnet_info
net: can: j1939: j1939_xtp_rx_rts_session_active(): deactivate session upon receiving the second rts
can: raw: instantly reject disabled CAN frames
can: propagate CAN device capabilities via ml_priv
Revert "can: raw: instantly reject unsupported CAN frames"
net/sched: sch_qfq: do not free existing class in qfq_change_class()
selftests: drv-net: fix RPS mask handling for high CPU numbers
selftests: drv-net: fix RPS mask handling in toeplitz test
ipv6: Fix use-after-free in inet6_addr_del().
dst: fix races in rt6_uncached_list_del() and rt_del_uncached_list()
net: hv_netvsc: reject RSS hash key programming without RX indirection table
tools: ynl: render event op docs correctly
net: add net.core.qdisc_max_burst
net: airoha: Fix typo in airoha_ppe_setup_tc_block_cb definition
net: phy: motorcomm: fix duplex setting error for phy leds
net: octeon_ep_vf: fix free_irq dev_id mismatch in IRQ rollback
net/mlx5e: Restore destroying state bit after profile cleanup
net/mlx5e: Pass netdev to mlx5e_destroy_netdev instead of priv
net/mlx5e: Don't store mlx5e_priv in mlx5e_dev devlink priv
net/mlx5e: Fix crash on profile change rollback failure
...
This commit is contained in:
@@ -303,6 +303,14 @@ netdev_max_backlog
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
receives packets faster than kernel can process them.
|
||||
|
||||
qdisc_max_burst
|
||||
------------------
|
||||
|
||||
Maximum number of packets that can be temporarily stored before
|
||||
reaching qdisc.
|
||||
|
||||
Default: 1000
|
||||
|
||||
netdev_rss_key
|
||||
--------------
|
||||
|
||||
|
||||
@@ -18424,9 +18424,11 @@ M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Sabrina Dubroca <sd@queasysnail.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/tls*
|
||||
F: include/net/tls.h
|
||||
F: include/uapi/linux/tls.h
|
||||
F: net/tls/*
|
||||
F: net/tls/
|
||||
F: tools/testing/selftests/net/tls.c
|
||||
|
||||
NETWORKING [SOCKETS]
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
menuconfig CAN_DEV
|
||||
bool "CAN Device Drivers"
|
||||
tristate "CAN Device Drivers"
|
||||
default y
|
||||
depends on CAN
|
||||
help
|
||||
@@ -17,7 +17,10 @@ menuconfig CAN_DEV
|
||||
virtual ones. If you own such devices or plan to use the virtual CAN
|
||||
interfaces to develop applications, say Y here.
|
||||
|
||||
if CAN_DEV && CAN
|
||||
To compile as a module, choose M here: the module will be called
|
||||
can-dev.
|
||||
|
||||
if CAN_DEV
|
||||
|
||||
config CAN_VCAN
|
||||
tristate "Virtual Local CAN Interface (vcan)"
|
||||
|
||||
@@ -7,7 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
|
||||
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
|
||||
obj-$(CONFIG_CAN_SLCAN) += slcan/
|
||||
|
||||
obj-$(CONFIG_CAN_DEV) += dev/
|
||||
obj-y += dev/
|
||||
obj-y += esd/
|
||||
obj-y += rcar/
|
||||
obj-y += rockchip/
|
||||
|
||||
@@ -310,7 +310,7 @@ static int ctucan_set_secondary_sample_point(struct net_device *ndev)
|
||||
}
|
||||
|
||||
ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
|
||||
ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
|
||||
ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x0);
|
||||
}
|
||||
|
||||
ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_CAN) += can-dev.o
|
||||
obj-$(CONFIG_CAN_DEV) += can-dev.o
|
||||
|
||||
can-dev-y += skb.o
|
||||
|
||||
can-dev-$(CONFIG_CAN_DEV) += skb.o
|
||||
can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
|
||||
can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
|
||||
can-dev-$(CONFIG_CAN_NETLINK) += dev.o
|
||||
|
||||
@@ -375,6 +375,32 @@ void can_set_default_mtu(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
void can_set_cap_info(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
u32 can_cap;
|
||||
|
||||
if (can_dev_in_xl_only_mode(priv)) {
|
||||
/* XL only mode => no CC/FD capability */
|
||||
can_cap = CAN_CAP_XL;
|
||||
} else {
|
||||
/* mixed mode => CC + FD/XL capability */
|
||||
can_cap = CAN_CAP_CC;
|
||||
|
||||
if (priv->ctrlmode & CAN_CTRLMODE_FD)
|
||||
can_cap |= CAN_CAP_FD;
|
||||
|
||||
if (priv->ctrlmode & CAN_CTRLMODE_XL)
|
||||
can_cap |= CAN_CAP_XL;
|
||||
}
|
||||
|
||||
if (priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
|
||||
CAN_CTRLMODE_RESTRICTED))
|
||||
can_cap |= CAN_CAP_RO;
|
||||
|
||||
can_set_cap(dev, can_cap);
|
||||
}
|
||||
|
||||
/* helper to define static CAN controller features at device creation time */
|
||||
int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
|
||||
{
|
||||
@@ -390,6 +416,7 @@ int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
|
||||
|
||||
/* override MTU which was set by default in can_setup()? */
|
||||
can_set_default_mtu(dev);
|
||||
can_set_cap_info(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -377,6 +377,7 @@ static int can_ctrlmode_changelink(struct net_device *dev,
|
||||
}
|
||||
|
||||
can_set_default_mtu(dev);
|
||||
can_set_cap_info(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1736,7 +1736,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
|
||||
dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
|
||||
__func__, i, rx_buf_len);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -751,6 +751,8 @@ resubmit_urb:
|
||||
hf, parent->hf_size_rx,
|
||||
gs_usb_receive_bulk_callback, parent);
|
||||
|
||||
usb_anchor_urb(urb, &parent->rx_submitted);
|
||||
|
||||
rc = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
||||
/* USB failure take down all interfaces */
|
||||
|
||||
@@ -130,6 +130,19 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void vcan_set_cap_info(struct net_device *dev)
|
||||
{
|
||||
u32 can_cap = CAN_CAP_CC;
|
||||
|
||||
if (dev->mtu > CAN_MTU)
|
||||
can_cap |= CAN_CAP_FD;
|
||||
|
||||
if (dev->mtu >= CANXL_MIN_MTU)
|
||||
can_cap |= CAN_CAP_XL;
|
||||
|
||||
can_set_cap(dev, can_cap);
|
||||
}
|
||||
|
||||
static int vcan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* Do not allow changing the MTU while running */
|
||||
@@ -141,6 +154,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
WRITE_ONCE(dev->mtu, new_mtu);
|
||||
vcan_set_cap_info(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -162,6 +176,7 @@ static void vcan_setup(struct net_device *dev)
|
||||
dev->tx_queue_len = 0;
|
||||
dev->flags = IFF_NOARP;
|
||||
can_set_ml_priv(dev, netdev_priv(dev));
|
||||
vcan_set_cap_info(dev);
|
||||
|
||||
/* set flags according to driver capabilities */
|
||||
if (echo)
|
||||
|
||||
@@ -125,6 +125,19 @@ static int vxcan_get_iflink(const struct net_device *dev)
|
||||
return iflink;
|
||||
}
|
||||
|
||||
static void vxcan_set_cap_info(struct net_device *dev)
|
||||
{
|
||||
u32 can_cap = CAN_CAP_CC;
|
||||
|
||||
if (dev->mtu > CAN_MTU)
|
||||
can_cap |= CAN_CAP_FD;
|
||||
|
||||
if (dev->mtu >= CANXL_MIN_MTU)
|
||||
can_cap |= CAN_CAP_XL;
|
||||
|
||||
can_set_cap(dev, can_cap);
|
||||
}
|
||||
|
||||
static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* Do not allow changing the MTU while running */
|
||||
@@ -136,6 +149,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
WRITE_ONCE(dev->mtu, new_mtu);
|
||||
vxcan_set_cap_info(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -167,6 +181,7 @@ static void vxcan_setup(struct net_device *dev)
|
||||
|
||||
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
|
||||
can_set_ml_priv(dev, can_ml);
|
||||
vxcan_set_cap_info(dev);
|
||||
}
|
||||
|
||||
/* forward declaration for rtnl_create_link() */
|
||||
|
||||
@@ -218,7 +218,7 @@ static int octep_vf_request_irqs(struct octep_vf_device *oct)
|
||||
ioq_irq_err:
|
||||
while (i) {
|
||||
--i;
|
||||
free_irq(oct->msix_entries[i].vector, oct);
|
||||
free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ struct mlx5e_priv {
|
||||
};
|
||||
|
||||
struct mlx5e_dev {
|
||||
struct mlx5e_priv *priv;
|
||||
struct net_device *netdev;
|
||||
struct devlink_port dl_port;
|
||||
};
|
||||
|
||||
@@ -1242,10 +1242,13 @@ struct net_device *
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
|
||||
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
|
||||
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
const struct mlx5e_profile *new_profile, void *new_ppriv);
|
||||
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_netdev(struct net_device *netdev);
|
||||
int mlx5e_netdev_change_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *new_profile,
|
||||
void *new_ppriv);
|
||||
void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev);
|
||||
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
|
||||
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
|
||||
|
||||
|
||||
@@ -6325,6 +6325,7 @@ err_free_cpumask:
|
||||
|
||||
void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
bool destroying = test_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
int i;
|
||||
|
||||
/* bail if change profile failed and also rollback failed */
|
||||
@@ -6352,6 +6353,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
}
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
if (destroying) /* restore destroying bit, to allow unload */
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
}
|
||||
|
||||
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
|
||||
@@ -6584,19 +6587,28 @@ profile_cleanup:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
const struct mlx5e_profile *new_profile, void *new_ppriv)
|
||||
int mlx5e_netdev_change_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *new_profile,
|
||||
void *new_ppriv)
|
||||
{
|
||||
const struct mlx5e_profile *orig_profile = priv->profile;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
void *orig_ppriv = priv->ppriv;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
const struct mlx5e_profile *orig_profile;
|
||||
int err, rollback_err;
|
||||
void *orig_ppriv;
|
||||
|
||||
/* cleanup old profile */
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_priv_cleanup(priv);
|
||||
orig_profile = priv->profile;
|
||||
orig_ppriv = priv->ppriv;
|
||||
|
||||
/* NULL could happen if previous change_profile failed to rollback */
|
||||
if (priv->profile) {
|
||||
WARN_ON_ONCE(priv->mdev != mdev);
|
||||
/* cleanup old profile */
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_priv_cleanup(priv);
|
||||
}
|
||||
/* priv members are not valid from this point ... */
|
||||
|
||||
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
|
||||
@@ -6613,23 +6625,33 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
if (!orig_profile) {
|
||||
netdev_warn(netdev, "no original profile to rollback to\n");
|
||||
priv->profile = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
|
||||
if (rollback_err)
|
||||
netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
|
||||
__func__, rollback_err);
|
||||
if (rollback_err) {
|
||||
netdev_err(netdev, "failed to rollback to orig profile, %d\n",
|
||||
rollback_err);
|
||||
priv->profile = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
|
||||
void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev)
|
||||
{
|
||||
mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
|
||||
mlx5e_netdev_change_profile(netdev, mdev, &mlx5e_nic_profile, NULL);
|
||||
}
|
||||
|
||||
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
|
||||
void mlx5e_destroy_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
mlx5e_priv_cleanup(priv);
|
||||
if (priv->profile)
|
||||
mlx5e_priv_cleanup(priv);
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
@@ -6637,8 +6659,8 @@ static int _mlx5e_resume(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
struct mlx5_core_dev *pos, *to;
|
||||
int err, i;
|
||||
@@ -6684,10 +6706,11 @@ static int mlx5e_resume(struct auxiliary_device *adev)
|
||||
|
||||
static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
struct mlx5_core_dev *pos;
|
||||
int i;
|
||||
|
||||
@@ -6748,11 +6771,11 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
|
||||
goto err_devlink_port_unregister;
|
||||
}
|
||||
SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
|
||||
mlx5e_dev->netdev = netdev;
|
||||
|
||||
mlx5e_build_nic_netdev(netdev);
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
mlx5e_dev->priv = priv;
|
||||
|
||||
priv->profile = profile;
|
||||
priv->ppriv = NULL;
|
||||
@@ -6785,7 +6808,7 @@ err_resume:
|
||||
err_profile_cleanup:
|
||||
profile->cleanup(priv);
|
||||
err_destroy_netdev:
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
err_devlink_port_unregister:
|
||||
mlx5e_devlink_port_unregister(mlx5e_dev);
|
||||
err_devlink_unregister:
|
||||
@@ -6815,17 +6838,20 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
|
||||
mlx5_core_uplink_netdev_set(mdev, NULL);
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
|
||||
if (priv->profile)
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
/* When unload driver, the netdev is in registered state
|
||||
* if it's from legacy mode. If from switchdev mode, it
|
||||
* is already unregistered before changing to NIC profile.
|
||||
*/
|
||||
if (priv->netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(priv->netdev);
|
||||
if (netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
} else {
|
||||
struct mlx5_core_dev *pos;
|
||||
@@ -6840,7 +6866,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
/* Avoid cleanup if profile rollback failed. */
|
||||
if (priv->profile)
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
mlx5e_devlink_port_unregister(mlx5e_dev);
|
||||
mlx5e_destroy_devlink(mlx5e_dev);
|
||||
}
|
||||
|
||||
@@ -1508,17 +1508,16 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
int err;
|
||||
|
||||
netdev = mlx5_uplink_netdev_get(dev);
|
||||
if (!netdev)
|
||||
return 0;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv->netdev = priv->netdev;
|
||||
err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
|
||||
rpriv);
|
||||
/* must not use netdev_priv(netdev), it might not be initialized yet */
|
||||
rpriv->netdev = netdev;
|
||||
err = mlx5e_netdev_change_profile(netdev, dev,
|
||||
&mlx5e_uplink_rep_profile, rpriv);
|
||||
mlx5_uplink_netdev_put(dev, netdev);
|
||||
return err;
|
||||
}
|
||||
@@ -1546,7 +1545,7 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
|
||||
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
|
||||
unregister_netdev(netdev);
|
||||
|
||||
mlx5e_netdev_attach_nic_profile(priv);
|
||||
mlx5e_netdev_attach_nic_profile(netdev, priv->mdev);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1612,7 +1611,7 @@ err_cleanup_profile:
|
||||
priv->profile->cleanup(priv);
|
||||
|
||||
err_destroy_netdev:
|
||||
mlx5e_destroy_netdev(netdev_priv(netdev));
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1667,7 +1666,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_rep_vnic_reporter_destroy(priv);
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
free_ppriv:
|
||||
kvfree(ppriv); /* mlx5e_rep_priv */
|
||||
}
|
||||
|
||||
@@ -1750,6 +1750,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
|
||||
rxfh->hfunc != ETH_RSS_HASH_TOP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!ndc->rx_table_sz)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rndis_dev = ndev->extension;
|
||||
if (rxfh->indir) {
|
||||
for (i = 0; i < ndc->rx_table_sz; i++)
|
||||
|
||||
@@ -59,7 +59,7 @@ struct macvlan_port {
|
||||
|
||||
struct macvlan_source_entry {
|
||||
struct hlist_node hlist;
|
||||
struct macvlan_dev *vlan;
|
||||
struct macvlan_dev __rcu *vlan;
|
||||
unsigned char addr[6+2] __aligned(sizeof(u16));
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
@@ -146,7 +146,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||
if (ether_addr_equal_64bits(entry->addr, addr) &&
|
||||
entry->vlan == vlan)
|
||||
rcu_access_pointer(entry->vlan) == vlan)
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
@@ -168,7 +168,7 @@ static int macvlan_hash_add_source(struct macvlan_dev *vlan,
|
||||
return -ENOMEM;
|
||||
|
||||
ether_addr_copy(entry->addr, addr);
|
||||
entry->vlan = vlan;
|
||||
RCU_INIT_POINTER(entry->vlan, vlan);
|
||||
h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
|
||||
hlist_add_head_rcu(&entry->hlist, h);
|
||||
vlan->macaddr_count++;
|
||||
@@ -187,6 +187,7 @@ static void macvlan_hash_add(struct macvlan_dev *vlan)
|
||||
|
||||
static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
|
||||
{
|
||||
RCU_INIT_POINTER(entry->vlan, NULL);
|
||||
hlist_del_rcu(&entry->hlist);
|
||||
kfree_rcu(entry, rcu);
|
||||
}
|
||||
@@ -390,7 +391,7 @@ static void macvlan_flush_sources(struct macvlan_port *port,
|
||||
int i;
|
||||
|
||||
hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
|
||||
if (entry->vlan == vlan)
|
||||
if (rcu_access_pointer(entry->vlan) == vlan)
|
||||
macvlan_hash_del_source(entry);
|
||||
|
||||
vlan->macaddr_count = 0;
|
||||
@@ -433,9 +434,14 @@ static bool macvlan_forward_source(struct sk_buff *skb,
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist) {
|
||||
if (ether_addr_equal_64bits(entry->addr, addr)) {
|
||||
if (entry->vlan->flags & MACVLAN_FLAG_NODST)
|
||||
struct macvlan_dev *vlan = rcu_dereference(entry->vlan);
|
||||
|
||||
if (!vlan)
|
||||
continue;
|
||||
|
||||
if (vlan->flags & MACVLAN_FLAG_NODST)
|
||||
consume = true;
|
||||
macvlan_forward_source_one(skb, entry->vlan);
|
||||
macvlan_forward_source_one(skb, vlan);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1680,7 +1686,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
|
||||
struct macvlan_source_entry *entry;
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||
if (entry->vlan != vlan)
|
||||
if (rcu_access_pointer(entry->vlan) != vlan)
|
||||
continue;
|
||||
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
|
||||
return 1;
|
||||
|
||||
@@ -1741,10 +1741,10 @@ static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
|
||||
val |= YT8521_LED_1000_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
|
||||
val |= YT8521_LED_HDX_ON_EN;
|
||||
val |= YT8521_LED_FDX_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
|
||||
val |= YT8521_LED_FDX_ON_EN;
|
||||
val |= YT8521_LED_HDX_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
|
||||
test_bit(TRIGGER_NETDEV_RX, &rules))
|
||||
|
||||
@@ -425,9 +425,6 @@ struct virtnet_info {
|
||||
u16 rss_indir_table_size;
|
||||
u32 rss_hash_types_supported;
|
||||
u32 rss_hash_types_saved;
|
||||
struct virtio_net_rss_config_hdr *rss_hdr;
|
||||
struct virtio_net_rss_config_trailer rss_trailer;
|
||||
u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
|
||||
|
||||
/* Has control virtqueue */
|
||||
bool has_cvq;
|
||||
@@ -441,9 +438,6 @@ struct virtnet_info {
|
||||
/* Packet virtio header size */
|
||||
u8 hdr_len;
|
||||
|
||||
/* Work struct for delayed refilling if we run low on memory. */
|
||||
struct delayed_work refill;
|
||||
|
||||
/* UDP tunnel support */
|
||||
bool tx_tnl;
|
||||
|
||||
@@ -451,12 +445,6 @@ struct virtnet_info {
|
||||
|
||||
bool rx_tnl_csum;
|
||||
|
||||
/* Is delayed refill enabled? */
|
||||
bool refill_enabled;
|
||||
|
||||
/* The lock to synchronize the access to refill_enabled */
|
||||
spinlock_t refill_lock;
|
||||
|
||||
/* Work struct for config space updates */
|
||||
struct work_struct config_work;
|
||||
|
||||
@@ -493,7 +481,16 @@ struct virtnet_info {
|
||||
struct failover *failover;
|
||||
|
||||
u64 device_stats_cap;
|
||||
|
||||
struct virtio_net_rss_config_hdr *rss_hdr;
|
||||
|
||||
/* Must be last as it ends in a flexible-array member. */
|
||||
TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
|
||||
u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
|
||||
);
|
||||
};
|
||||
static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
|
||||
offsetof(struct virtnet_info, rss_hash_key_data));
|
||||
|
||||
struct padded_vnet_hdr {
|
||||
struct virtio_net_hdr_v1_hash hdr;
|
||||
@@ -720,20 +717,6 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
|
||||
put_page(virt_to_head_page(buf));
|
||||
}
|
||||
|
||||
static void enable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = true;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void disable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = false;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void enable_rx_mode_work(struct virtnet_info *vi)
|
||||
{
|
||||
rtnl_lock();
|
||||
@@ -2948,42 +2931,6 @@ static void virtnet_napi_disable(struct receive_queue *rq)
|
||||
napi_disable(napi);
|
||||
}
|
||||
|
||||
static void refill_work(struct work_struct *work)
|
||||
{
|
||||
struct virtnet_info *vi =
|
||||
container_of(work, struct virtnet_info, refill.work);
|
||||
bool still_empty;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++) {
|
||||
struct receive_queue *rq = &vi->rq[i];
|
||||
|
||||
/*
|
||||
* When queue API support is added in the future and the call
|
||||
* below becomes napi_disable_locked, this driver will need to
|
||||
* be refactored.
|
||||
*
|
||||
* One possible solution would be to:
|
||||
* - cancel refill_work with cancel_delayed_work (note:
|
||||
* non-sync)
|
||||
* - cancel refill_work with cancel_delayed_work_sync in
|
||||
* virtnet_remove after the netdev is unregistered
|
||||
* - wrap all of the work in a lock (perhaps the netdev
|
||||
* instance lock)
|
||||
* - check netif_running() and return early to avoid a race
|
||||
*/
|
||||
napi_disable(&rq->napi);
|
||||
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
|
||||
virtnet_napi_do_enable(rq->vq, &rq->napi);
|
||||
|
||||
/* In theory, this can happen: if we don't get any buffers in
|
||||
* we will *never* try to fill again.
|
||||
*/
|
||||
if (still_empty)
|
||||
schedule_delayed_work(&vi->refill, HZ/2);
|
||||
}
|
||||
}
|
||||
|
||||
static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
|
||||
struct receive_queue *rq,
|
||||
int budget,
|
||||
@@ -3046,16 +2993,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
else
|
||||
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
|
||||
|
||||
u64_stats_set(&stats.packets, packets);
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
|
||||
spin_lock(&vi->refill_lock);
|
||||
if (vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock(&vi->refill_lock);
|
||||
}
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
/* We need to retry refilling in the next NAPI poll so
|
||||
* we must return budget to make sure the NAPI is
|
||||
* repolled.
|
||||
*/
|
||||
packets = budget;
|
||||
}
|
||||
|
||||
u64_stats_set(&stats.packets, packets);
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
|
||||
size_t offset = virtnet_rq_stats_desc[i].offset;
|
||||
@@ -3226,13 +3173,12 @@ static int virtnet_open(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i, err;
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (i < vi->curr_queue_pairs)
|
||||
/* Make sure we have some buffers: if oom use wq. */
|
||||
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
/* Pre-fill rq agressively, to make sure we are ready to
|
||||
* get packets immediately.
|
||||
*/
|
||||
try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
|
||||
|
||||
err = virtnet_enable_queue_pair(vi, i);
|
||||
if (err < 0)
|
||||
@@ -3251,9 +3197,6 @@ static int virtnet_open(struct net_device *dev)
|
||||
return 0;
|
||||
|
||||
err_enable_qp:
|
||||
disable_delayed_refill(vi);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
virtnet_disable_queue_pair(vi, i);
|
||||
virtnet_cancel_dim(vi, &vi->rq[i].dim);
|
||||
@@ -3432,8 +3375,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void __virtnet_rx_pause(struct virtnet_info *vi,
|
||||
struct receive_queue *rq)
|
||||
static void virtnet_rx_pause(struct virtnet_info *vi,
|
||||
struct receive_queue *rq)
|
||||
{
|
||||
bool running = netif_running(vi->dev);
|
||||
|
||||
@@ -3447,62 +3390,37 @@ static void virtnet_rx_pause_all(struct virtnet_info *vi)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Make sure refill_work does not run concurrently to
|
||||
* avoid napi_disable race which leads to deadlock.
|
||||
*/
|
||||
disable_delayed_refill(vi);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
__virtnet_rx_pause(vi, &vi->rq[i]);
|
||||
virtnet_rx_pause(vi, &vi->rq[i]);
|
||||
}
|
||||
|
||||
static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
|
||||
static void virtnet_rx_resume(struct virtnet_info *vi,
|
||||
struct receive_queue *rq,
|
||||
bool refill)
|
||||
{
|
||||
/*
|
||||
* Make sure refill_work does not run concurrently to
|
||||
* avoid napi_disable race which leads to deadlock.
|
||||
*/
|
||||
disable_delayed_refill(vi);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
__virtnet_rx_pause(vi, rq);
|
||||
}
|
||||
if (netif_running(vi->dev)) {
|
||||
/* Pre-fill rq agressively, to make sure we are ready to get
|
||||
* packets immediately.
|
||||
*/
|
||||
if (refill)
|
||||
try_fill_recv(vi, rq, GFP_KERNEL);
|
||||
|
||||
static void __virtnet_rx_resume(struct virtnet_info *vi,
|
||||
struct receive_queue *rq,
|
||||
bool refill)
|
||||
{
|
||||
bool running = netif_running(vi->dev);
|
||||
bool schedule_refill = false;
|
||||
|
||||
if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
|
||||
schedule_refill = true;
|
||||
if (running)
|
||||
virtnet_napi_enable(rq);
|
||||
|
||||
if (schedule_refill)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtnet_rx_resume_all(struct virtnet_info *vi)
|
||||
{
|
||||
int i;
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (i < vi->curr_queue_pairs)
|
||||
__virtnet_rx_resume(vi, &vi->rq[i], true);
|
||||
virtnet_rx_resume(vi, &vi->rq[i], true);
|
||||
else
|
||||
__virtnet_rx_resume(vi, &vi->rq[i], false);
|
||||
virtnet_rx_resume(vi, &vi->rq[i], false);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
|
||||
{
|
||||
enable_delayed_refill(vi);
|
||||
__virtnet_rx_resume(vi, rq, true);
|
||||
}
|
||||
|
||||
static int virtnet_rx_resize(struct virtnet_info *vi,
|
||||
struct receive_queue *rq, u32 ring_num)
|
||||
{
|
||||
@@ -3516,7 +3434,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
|
||||
if (err)
|
||||
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
|
||||
|
||||
virtnet_rx_resume(vi, rq);
|
||||
virtnet_rx_resume(vi, rq, true);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -3829,11 +3747,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
|
||||
}
|
||||
succ:
|
||||
vi->curr_queue_pairs = queue_pairs;
|
||||
/* virtnet_open() will refill when device is going to up. */
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
if (dev->flags & IFF_UP && vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
if (dev->flags & IFF_UP) {
|
||||
local_bh_disable();
|
||||
for (int i = 0; i < vi->curr_queue_pairs; ++i)
|
||||
virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3843,10 +3762,6 @@ static int virtnet_close(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
/* Make sure NAPI doesn't schedule refill work */
|
||||
disable_delayed_refill(vi);
|
||||
/* Make sure refill_work doesn't re-enable napi! */
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
/* Prevent the config change callback from changing carrier
|
||||
* after close
|
||||
*/
|
||||
@@ -5802,7 +5717,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
enable_rx_mode_work(vi);
|
||||
|
||||
if (netif_running(vi->dev)) {
|
||||
@@ -5892,7 +5806,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
|
||||
|
||||
rq->xsk_pool = pool;
|
||||
|
||||
virtnet_rx_resume(vi, rq);
|
||||
virtnet_rx_resume(vi, rq, true);
|
||||
|
||||
if (pool)
|
||||
return 0;
|
||||
@@ -6559,7 +6473,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
|
||||
if (!vi->rq)
|
||||
goto err_rq;
|
||||
|
||||
INIT_DELAYED_WORK(&vi->refill, refill_work);
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
vi->rq[i].pages = NULL;
|
||||
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
||||
@@ -6901,7 +6814,6 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
|
||||
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
|
||||
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
|
||||
spin_lock_init(&vi->refill_lock);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
vi->mergeable_rx_bufs = true;
|
||||
@@ -7165,7 +7077,6 @@ free_failover:
|
||||
net_failover_destroy(vi->failover);
|
||||
free_vqs:
|
||||
virtio_reset_device(vdev);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
free_receive_page_frags(vi);
|
||||
virtnet_del_vqs(vi);
|
||||
free:
|
||||
|
||||
@@ -46,6 +46,12 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
/* exposed CAN device capabilities for network layer */
|
||||
#define CAN_CAP_CC BIT(0) /* CAN CC aka Classical CAN */
|
||||
#define CAN_CAP_FD BIT(1) /* CAN FD */
|
||||
#define CAN_CAP_XL BIT(2) /* CAN XL */
|
||||
#define CAN_CAP_RO BIT(3) /* read-only mode (LISTEN/RESTRICTED) */
|
||||
|
||||
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
|
||||
#define CAN_EFF_RCV_HASH_BITS 10
|
||||
#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
|
||||
@@ -64,6 +70,7 @@ struct can_ml_priv {
|
||||
#ifdef CAN_J1939
|
||||
struct j1939_priv *j1939_priv;
|
||||
#endif
|
||||
u32 can_cap;
|
||||
};
|
||||
|
||||
static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
|
||||
@@ -77,4 +84,21 @@ static inline void can_set_ml_priv(struct net_device *dev,
|
||||
netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
|
||||
}
|
||||
|
||||
static inline bool can_cap_enabled(struct net_device *dev, u32 cap)
|
||||
{
|
||||
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
|
||||
|
||||
if (!can_ml)
|
||||
return false;
|
||||
|
||||
return (can_ml->can_cap & cap);
|
||||
}
|
||||
|
||||
static inline void can_set_cap(struct net_device *dev, u32 cap)
|
||||
{
|
||||
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
|
||||
|
||||
can_ml->can_cap = cap;
|
||||
}
|
||||
|
||||
#endif /* CAN_ML_H */
|
||||
|
||||
@@ -111,18 +111,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
|
||||
void free_candev(struct net_device *dev);
|
||||
|
||||
/* a candev safe wrapper around netdev_priv */
|
||||
#if IS_ENABLED(CONFIG_CAN_NETLINK)
|
||||
struct can_priv *safe_candev_priv(struct net_device *dev);
|
||||
#else
|
||||
static inline struct can_priv *safe_candev_priv(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
int open_candev(struct net_device *dev);
|
||||
void close_candev(struct net_device *dev);
|
||||
void can_set_default_mtu(struct net_device *dev);
|
||||
void can_set_cap_info(struct net_device *dev);
|
||||
int __must_check can_set_static_ctrlmode(struct net_device *dev,
|
||||
u32 static_mode);
|
||||
int can_hwtstamp_get(struct net_device *netdev,
|
||||
|
||||
@@ -52,8 +52,8 @@ static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
|
||||
void *type_data)
|
||||
static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
|
||||
void *type_data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
FN(TC_EGRESS) \
|
||||
FN(SECURITY_HOOK) \
|
||||
FN(QDISC_DROP) \
|
||||
FN(QDISC_BURST_DROP) \
|
||||
FN(QDISC_OVERLIMIT) \
|
||||
FN(QDISC_CONGESTED) \
|
||||
FN(CAKE_FLOOD) \
|
||||
@@ -374,6 +375,11 @@ enum skb_drop_reason {
|
||||
* failed to enqueue to current qdisc)
|
||||
*/
|
||||
SKB_DROP_REASON_QDISC_DROP,
|
||||
/**
|
||||
* @SKB_DROP_REASON_QDISC_BURST_DROP: dropped when net.core.qdisc_max_burst
|
||||
* limit is hit.
|
||||
*/
|
||||
SKB_DROP_REASON_QDISC_BURST_DROP,
|
||||
/**
|
||||
* @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
|
||||
* instance exceeds its total buffer size limit.
|
||||
|
||||
@@ -42,6 +42,7 @@ struct net_hotdata {
|
||||
int netdev_budget_usecs;
|
||||
int tstamp_prequeue;
|
||||
int max_backlog;
|
||||
int qdisc_max_burst;
|
||||
int dev_tx_weight;
|
||||
int dev_rx_weight;
|
||||
int sysctl_max_skb_frags;
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/lwtunnel.h>
|
||||
#include <net/dst_cache.h>
|
||||
#include <net/netdev_lock.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#include <net/ipv6.h>
|
||||
@@ -372,7 +373,17 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
|
||||
fl4->flowi4_flags = flow_flags;
|
||||
}
|
||||
|
||||
int ip_tunnel_init(struct net_device *dev);
|
||||
int __ip_tunnel_init(struct net_device *dev);
|
||||
#define ip_tunnel_init(DEV) \
|
||||
({ \
|
||||
struct net_device *__dev = (DEV); \
|
||||
int __res = __ip_tunnel_init(__dev); \
|
||||
\
|
||||
if (!__res) \
|
||||
netdev_lockdep_set_classes(__dev);\
|
||||
__res; \
|
||||
})
|
||||
|
||||
void ip_tunnel_uninit(struct net_device *dev);
|
||||
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
|
||||
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
|
||||
|
||||
@@ -4420,6 +4420,7 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
|
||||
if (bis_capable(hdev)) {
|
||||
events[1] |= 0x20; /* LE PA Report */
|
||||
events[1] |= 0x40; /* LE PA Sync Established */
|
||||
events[1] |= 0x80; /* LE PA Sync Lost */
|
||||
events[3] |= 0x04; /* LE Create BIG Complete */
|
||||
events[3] |= 0x08; /* LE Terminate BIG Complete */
|
||||
events[3] |= 0x10; /* LE BIG Sync Established */
|
||||
|
||||
@@ -70,7 +70,7 @@ static inline int has_expired(const struct net_bridge *br,
|
||||
{
|
||||
return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
|
||||
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
|
||||
time_before_eq(fdb->updated + hold_time(br), jiffies);
|
||||
time_before_eq(READ_ONCE(fdb->updated) + hold_time(br), jiffies);
|
||||
}
|
||||
|
||||
static int fdb_to_nud(const struct net_bridge *br,
|
||||
@@ -126,9 +126,9 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
|
||||
if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
|
||||
goto nla_put_failure;
|
||||
|
||||
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
|
||||
ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
|
||||
ci.ndm_confirmed = 0;
|
||||
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
|
||||
ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
|
||||
ci.ndm_refcnt = 0;
|
||||
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
|
||||
goto nla_put_failure;
|
||||
@@ -551,7 +551,7 @@ void br_fdb_cleanup(struct work_struct *work)
|
||||
*/
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
|
||||
unsigned long this_timer = f->updated + delay;
|
||||
unsigned long this_timer = READ_ONCE(f->updated) + delay;
|
||||
|
||||
if (test_bit(BR_FDB_STATIC, &f->flags) ||
|
||||
test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
|
||||
@@ -924,6 +924,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
|
||||
{
|
||||
struct net_bridge_fdb_entry *f;
|
||||
struct __fdb_entry *fe = buf;
|
||||
unsigned long delta;
|
||||
int num = 0;
|
||||
|
||||
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
|
||||
@@ -953,8 +954,11 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
|
||||
fe->port_hi = f->dst->port_no >> 8;
|
||||
|
||||
fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags))
|
||||
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags)) {
|
||||
delta = jiffies - READ_ONCE(f->updated);
|
||||
fe->ageing_timer_value =
|
||||
jiffies_delta_to_clock_t(delta);
|
||||
}
|
||||
++fe;
|
||||
++num;
|
||||
}
|
||||
@@ -1002,8 +1006,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
unsigned long now = jiffies;
|
||||
bool fdb_modified = false;
|
||||
|
||||
if (now != fdb->updated) {
|
||||
fdb->updated = now;
|
||||
if (now != READ_ONCE(fdb->updated)) {
|
||||
WRITE_ONCE(fdb->updated, now);
|
||||
fdb_modified = __fdb_mark_active(fdb);
|
||||
}
|
||||
|
||||
@@ -1242,10 +1246,10 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
|
||||
if (fdb_handle_notify(fdb, notify))
|
||||
modified = true;
|
||||
|
||||
fdb->used = jiffies;
|
||||
WRITE_ONCE(fdb->used, jiffies);
|
||||
if (modified) {
|
||||
if (refresh)
|
||||
fdb->updated = jiffies;
|
||||
WRITE_ONCE(fdb->updated, jiffies);
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
|
||||
}
|
||||
|
||||
@@ -1556,7 +1560,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
fdb->updated = jiffies;
|
||||
WRITE_ONCE(fdb->updated, jiffies);
|
||||
|
||||
if (READ_ONCE(fdb->dst) != p) {
|
||||
WRITE_ONCE(fdb->dst, p);
|
||||
@@ -1565,7 +1569,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
|
||||
if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
|
||||
/* Refresh entry */
|
||||
fdb->used = jiffies;
|
||||
WRITE_ONCE(fdb->used, jiffies);
|
||||
} else {
|
||||
modified = true;
|
||||
}
|
||||
|
||||
@@ -221,8 +221,8 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
if (test_bit(BR_FDB_LOCAL, &dst->flags))
|
||||
return br_pass_frame_up(skb, false);
|
||||
|
||||
if (now != dst->used)
|
||||
dst->used = now;
|
||||
if (now != READ_ONCE(dst->used))
|
||||
WRITE_ONCE(dst->used, now);
|
||||
br_forward(dst->dst, skb, local_rcv, false);
|
||||
} else {
|
||||
if (!mcast_hit)
|
||||
|
||||
@@ -1695,8 +1695,16 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
|
||||
|
||||
j1939_session_timers_cancel(session);
|
||||
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
|
||||
if (session->transmission)
|
||||
if (session->transmission) {
|
||||
j1939_session_deactivate_activate_next(session);
|
||||
} else if (session->state == J1939_SESSION_WAITING_ABORT) {
|
||||
/* Force deactivation for the receiver.
|
||||
* If we rely on the timer starting in j1939_session_cancel,
|
||||
* a second RTS call here will cancel that timer and fail
|
||||
* to restart it because the state is already WAITING_ABORT.
|
||||
*/
|
||||
j1939_session_deactivate_activate_next(session);
|
||||
}
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/can.h>
|
||||
#include <linux/can/can-ml.h>
|
||||
#include <linux/can/core.h>
|
||||
#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
|
||||
#include <linux/can/skb.h>
|
||||
#include <linux/can/raw.h>
|
||||
#include <net/sock.h>
|
||||
@@ -892,58 +892,21 @@ static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool raw_dev_cc_enabled(struct net_device *dev,
|
||||
struct can_priv *priv)
|
||||
{
|
||||
/* The CANXL-only mode disables error-signalling on the CAN bus
|
||||
* which is needed to send CAN CC/FD frames
|
||||
*/
|
||||
if (priv)
|
||||
return !can_dev_in_xl_only_mode(priv);
|
||||
|
||||
/* virtual CAN interfaces always support CAN CC */
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool raw_dev_fd_enabled(struct net_device *dev,
|
||||
struct can_priv *priv)
|
||||
{
|
||||
/* check FD ctrlmode on real CAN interfaces */
|
||||
if (priv)
|
||||
return (priv->ctrlmode & CAN_CTRLMODE_FD);
|
||||
|
||||
/* check MTU for virtual CAN FD interfaces */
|
||||
return (READ_ONCE(dev->mtu) >= CANFD_MTU);
|
||||
}
|
||||
|
||||
static inline bool raw_dev_xl_enabled(struct net_device *dev,
|
||||
struct can_priv *priv)
|
||||
{
|
||||
/* check XL ctrlmode on real CAN interfaces */
|
||||
if (priv)
|
||||
return (priv->ctrlmode & CAN_CTRLMODE_XL);
|
||||
|
||||
/* check MTU for virtual CAN XL interfaces */
|
||||
return can_is_canxl_dev_mtu(READ_ONCE(dev->mtu));
|
||||
}
|
||||
|
||||
static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = safe_candev_priv(dev);
|
||||
|
||||
/* Classical CAN */
|
||||
if (can_is_can_skb(skb) && raw_dev_cc_enabled(dev, priv))
|
||||
if (can_is_can_skb(skb) && can_cap_enabled(dev, CAN_CAP_CC))
|
||||
return CAN_MTU;
|
||||
|
||||
/* CAN FD */
|
||||
if (ro->fd_frames && can_is_canfd_skb(skb) &&
|
||||
raw_dev_fd_enabled(dev, priv))
|
||||
can_cap_enabled(dev, CAN_CAP_FD))
|
||||
return CANFD_MTU;
|
||||
|
||||
/* CAN XL */
|
||||
if (ro->xl_frames && can_is_canxl_skb(skb) &&
|
||||
raw_dev_xl_enabled(dev, priv))
|
||||
can_cap_enabled(dev, CAN_CAP_XL))
|
||||
return CANXL_MTU;
|
||||
|
||||
return 0;
|
||||
@@ -982,6 +945,12 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
if (!dev)
|
||||
return -ENXIO;
|
||||
|
||||
/* no sending on a CAN device in read-only mode */
|
||||
if (can_cap_enabled(dev, CAN_CAP_RO)) {
|
||||
err = -EACCES;
|
||||
goto put_dev;
|
||||
}
|
||||
|
||||
skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
|
||||
msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb)
|
||||
|
||||
@@ -478,15 +478,21 @@ static const unsigned short netdev_lock_type[] = {
|
||||
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
|
||||
ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
|
||||
ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
|
||||
ARPHRD_CAN, ARPHRD_MCTP,
|
||||
ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
|
||||
ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
|
||||
ARPHRD_RAWHDLC, ARPHRD_RAWIP,
|
||||
ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
|
||||
ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
|
||||
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
|
||||
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
|
||||
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
|
||||
ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
|
||||
ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
|
||||
ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
|
||||
ARPHRD_IEEE80211_RADIOTAP,
|
||||
ARPHRD_IEEE802154, ARPHRD_IEEE802154_MONITOR,
|
||||
ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
|
||||
ARPHRD_CAIF, ARPHRD_IP6GRE, ARPHRD_NETLINK, ARPHRD_6LOWPAN,
|
||||
ARPHRD_VSOCKMON,
|
||||
ARPHRD_VOID, ARPHRD_NONE};
|
||||
|
||||
static const char *const netdev_lock_name[] = {
|
||||
"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
|
||||
@@ -495,15 +501,21 @@ static const char *const netdev_lock_name[] = {
|
||||
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
|
||||
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
|
||||
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
|
||||
"_xmit_CAN", "_xmit_MCTP",
|
||||
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
|
||||
"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
|
||||
"_xmit_RAWHDLC", "_xmit_RAWIP",
|
||||
"_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
|
||||
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
|
||||
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
|
||||
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
|
||||
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
|
||||
"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
|
||||
"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
|
||||
"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
|
||||
"_xmit_IEEE80211_RADIOTAP",
|
||||
"_xmit_IEEE802154", "_xmit_IEEE802154_MONITOR",
|
||||
"_xmit_PHONET", "_xmit_PHONET_PIPE",
|
||||
"_xmit_CAIF", "_xmit_IP6GRE", "_xmit_NETLINK", "_xmit_6LOWPAN",
|
||||
"_xmit_VSOCKMON",
|
||||
"_xmit_VOID", "_xmit_NONE"};
|
||||
|
||||
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
|
||||
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
|
||||
@@ -516,6 +528,7 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
|
||||
if (netdev_lock_type[i] == dev_type)
|
||||
return i;
|
||||
/* the last key is used by default */
|
||||
WARN_ONCE(1, "netdev_lock_pos() could not find dev_type=%u\n", dev_type);
|
||||
return ARRAY_SIZE(netdev_lock_type) - 1;
|
||||
}
|
||||
|
||||
@@ -4190,8 +4203,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
||||
do {
|
||||
if (first_n && !defer_count) {
|
||||
defer_count = atomic_long_inc_return(&q->defer_count);
|
||||
if (unlikely(defer_count > READ_ONCE(q->limit))) {
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_DROP);
|
||||
if (unlikely(defer_count > READ_ONCE(net_hotdata.qdisc_max_burst))) {
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_BURST_DROP);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
}
|
||||
@@ -4209,7 +4222,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
||||
ll_list = llist_del_all(&q->defer_list);
|
||||
/* There is a small race because we clear defer_count not atomically
|
||||
* with the prior llist_del_all(). This means defer_list could grow
|
||||
* over q->limit.
|
||||
* over qdisc_max_burst.
|
||||
*/
|
||||
atomic_long_set(&q->defer_count, 0);
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
|
||||
dst->lwtstate = NULL;
|
||||
rcuref_init(&dst->__rcuref, 1);
|
||||
INIT_LIST_HEAD(&dst->rt_uncached);
|
||||
dst->rt_uncached_list = NULL;
|
||||
dst->__use = 0;
|
||||
dst->lastuse = jiffies;
|
||||
dst->flags = flags;
|
||||
|
||||
@@ -17,6 +17,7 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
|
||||
|
||||
.tstamp_prequeue = 1,
|
||||
.max_backlog = 1000,
|
||||
.qdisc_max_burst = 1000,
|
||||
.dev_tx_weight = 64,
|
||||
.dev_rx_weight = 64,
|
||||
.sysctl_max_skb_frags = MAX_SKB_FRAGS,
|
||||
|
||||
@@ -429,6 +429,13 @@ static struct ctl_table net_core_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "qdisc_max_burst",
|
||||
.data = &net_hotdata.qdisc_max_burst,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "netdev_rss_key",
|
||||
.data = &netdev_rss_key,
|
||||
|
||||
@@ -122,8 +122,8 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
|
||||
XFRM_MODE_SKB_CB(skb)->protocol);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, xo->proto);
|
||||
__be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
|
||||
: htons(ETH_P_IP);
|
||||
|
||||
|
||||
@@ -891,10 +891,17 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
|
||||
const void *daddr, const void *saddr, unsigned int len)
|
||||
{
|
||||
struct ip_tunnel *t = netdev_priv(dev);
|
||||
struct iphdr *iph;
|
||||
struct gre_base_hdr *greh;
|
||||
struct iphdr *iph;
|
||||
int needed;
|
||||
|
||||
iph = skb_push(skb, t->hlen + sizeof(*iph));
|
||||
needed = t->hlen + sizeof(*iph);
|
||||
if (skb_headroom(skb) < needed &&
|
||||
pskb_expand_head(skb, HH_DATA_ALIGN(needed - skb_headroom(skb)),
|
||||
0, GFP_ATOMIC))
|
||||
return -needed;
|
||||
|
||||
iph = skb_push(skb, needed);
|
||||
greh = (struct gre_base_hdr *)(iph+1);
|
||||
greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
|
||||
greh->protocol = htons(type);
|
||||
|
||||
@@ -1281,7 +1281,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
|
||||
|
||||
int ip_tunnel_init(struct net_device *dev)
|
||||
int __ip_tunnel_init(struct net_device *dev)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
struct iphdr *iph = &tunnel->parms.iph;
|
||||
@@ -1308,10 +1308,9 @@ int ip_tunnel_init(struct net_device *dev)
|
||||
|
||||
if (tunnel->collect_md)
|
||||
netif_keep_dst(dev);
|
||||
netdev_lockdep_set_classes(dev);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_init);
|
||||
EXPORT_SYMBOL_GPL(__ip_tunnel_init);
|
||||
|
||||
void ip_tunnel_uninit(struct net_device *dev)
|
||||
{
|
||||
|
||||
@@ -1537,9 +1537,9 @@ void rt_add_uncached_list(struct rtable *rt)
|
||||
|
||||
void rt_del_uncached_list(struct rtable *rt)
|
||||
{
|
||||
if (!list_empty(&rt->dst.rt_uncached)) {
|
||||
struct uncached_list *ul = rt->dst.rt_uncached_list;
|
||||
struct uncached_list *ul = rt->dst.rt_uncached_list;
|
||||
|
||||
if (ul) {
|
||||
spin_lock_bh(&ul->lock);
|
||||
list_del_init(&rt->dst.rt_uncached);
|
||||
spin_unlock_bh(&ul->lock);
|
||||
|
||||
@@ -3112,12 +3112,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
|
||||
in6_ifa_hold(ifp);
|
||||
read_unlock_bh(&idev->lock);
|
||||
|
||||
ipv6_del_addr(ifp);
|
||||
|
||||
if (!(ifp->flags & IFA_F_TEMPORARY) &&
|
||||
(ifp->flags & IFA_F_MANAGETEMPADDR))
|
||||
delete_tempaddrs(idev, ifp);
|
||||
|
||||
ipv6_del_addr(ifp);
|
||||
|
||||
addrconf_verify_rtnl(net);
|
||||
if (ipv6_addr_is_multicast(pfx)) {
|
||||
ipv6_mc_config(net->ipv6.mc_autojoin_sk,
|
||||
|
||||
@@ -158,8 +158,8 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
|
||||
XFRM_MODE_SKB_CB(skb)->protocol);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, xo->proto);
|
||||
__be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
|
||||
: htons(ETH_P_IPV6);
|
||||
|
||||
|
||||
@@ -844,7 +844,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
if (!pskb_inet_may_pull(skb)) {
|
||||
if (skb_vlan_inet_prepare(skb, true)) {
|
||||
DEV_STATS_INC(tunnel->dev, rx_length_errors);
|
||||
DEV_STATS_INC(tunnel->dev, rx_errors);
|
||||
goto drop;
|
||||
|
||||
@@ -148,9 +148,9 @@ void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
|
||||
void rt6_uncached_list_del(struct rt6_info *rt)
|
||||
{
|
||||
if (!list_empty(&rt->dst.rt_uncached)) {
|
||||
struct uncached_list *ul = rt->dst.rt_uncached_list;
|
||||
struct uncached_list *ul = rt->dst.rt_uncached_list;
|
||||
|
||||
if (ul) {
|
||||
spin_lock_bh(&ul->lock);
|
||||
list_del_init(&rt->dst.rt_uncached);
|
||||
spin_unlock_bh(&ul->lock);
|
||||
|
||||
@@ -529,8 +529,10 @@ set_change_agg:
|
||||
return 0;
|
||||
|
||||
destroy_class:
|
||||
qdisc_put(cl->qdisc);
|
||||
kfree(cl);
|
||||
if (!existing) {
|
||||
qdisc_put(cl->qdisc);
|
||||
kfree(cl);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -3151,6 +3151,7 @@ int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
|
||||
int err;
|
||||
|
||||
if (family == AF_INET &&
|
||||
(!x->dir || x->dir == XFRM_SA_DIR_OUT) &&
|
||||
READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
|
||||
x->props.flags |= XFRM_STATE_NOPMTUDISC;
|
||||
|
||||
|
||||
@@ -166,13 +166,13 @@ class YnlDocGenerator:
|
||||
continue
|
||||
lines.append(self.fmt.rst_paragraph(self.fmt.bold(key), level + 1))
|
||||
if key in ['request', 'reply']:
|
||||
lines.append(self.parse_do_attributes(do_dict[key], level + 1) + "\n")
|
||||
lines.append(self.parse_op_attributes(do_dict[key], level + 1) + "\n")
|
||||
else:
|
||||
lines.append(self.fmt.headroom(level + 2) + do_dict[key] + "\n")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse_do_attributes(self, attrs: Dict[str, Any], level: int = 0) -> str:
|
||||
def parse_op_attributes(self, attrs: Dict[str, Any], level: int = 0) -> str:
|
||||
"""Parse 'attributes' section"""
|
||||
if "attributes" not in attrs:
|
||||
return ""
|
||||
@@ -184,7 +184,7 @@ class YnlDocGenerator:
|
||||
|
||||
def parse_operations(self, operations: List[Dict[str, Any]], namespace: str) -> str:
|
||||
"""Parse operations block"""
|
||||
preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
|
||||
preprocessed = ["name", "doc", "title", "do", "dump", "flags", "event"]
|
||||
linkable = ["fixed-header", "attribute-set"]
|
||||
lines = []
|
||||
|
||||
@@ -217,6 +217,9 @@ class YnlDocGenerator:
|
||||
if "dump" in operation:
|
||||
lines.append(self.fmt.rst_paragraph(":dump:", 0))
|
||||
lines.append(self.parse_do(operation["dump"], 0))
|
||||
if "event" in operation:
|
||||
lines.append(self.fmt.rst_paragraph(":event:", 0))
|
||||
lines.append(self.parse_op_attributes(operation["event"], 0))
|
||||
|
||||
# New line after fields
|
||||
lines.append("\n")
|
||||
|
||||
@@ -485,8 +485,8 @@ static void parse_rps_bitmap(const char *arg)
|
||||
|
||||
bitmap = strtoul(arg, NULL, 0);
|
||||
|
||||
if (bitmap & ~(RPS_MAX_CPUS - 1))
|
||||
error(1, 0, "rps bitmap 0x%lx out of bounds 0..%lu",
|
||||
if (bitmap & ~((1UL << RPS_MAX_CPUS) - 1))
|
||||
error(1, 0, "rps bitmap 0x%lx out of bounds, max cpu %lu",
|
||||
bitmap, RPS_MAX_CPUS - 1);
|
||||
|
||||
for (i = 0; i < RPS_MAX_CPUS; i++)
|
||||
|
||||
@@ -94,12 +94,14 @@ def _configure_rps(cfg, rps_cpus):
|
||||
mask = 0
|
||||
for cpu in rps_cpus:
|
||||
mask |= (1 << cpu)
|
||||
mask = hex(mask)[2:]
|
||||
|
||||
mask = hex(mask)
|
||||
|
||||
# Set RPS bitmap for all rx queues
|
||||
for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
|
||||
with open(rps_file, "w", encoding="utf-8") as fp:
|
||||
fp.write(mask)
|
||||
# sysfs expects hex without '0x' prefix, toeplitz.c needs the prefix
|
||||
fp.write(mask[2:])
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
@@ -511,6 +511,18 @@ void run_tests(const struct test_case *test_cases,
|
||||
|
||||
printf("ok\n");
|
||||
}
|
||||
|
||||
printf("All tests have been executed. Waiting other peer...");
|
||||
fflush(stdout);
|
||||
|
||||
/*
|
||||
* Final full barrier, to ensure that all tests have been run and
|
||||
* that even the last one has been successful on both sides.
|
||||
*/
|
||||
control_writeln("COMPLETED");
|
||||
control_expectln("COMPLETED");
|
||||
|
||||
printf("ok\n");
|
||||
}
|
||||
|
||||
void list_tests(const struct test_case *test_cases)
|
||||
|
||||
Reference in New Issue
Block a user