mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 06:50:43 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from Davic Miller:
1) Support busy polling generically, for all NAPI drivers. From Eric
Dumazet.
2) Add byte/packet counter support to nft_ct, from Floriani Westphal.
3) Add RSS/XPS support to mvneta driver, from Gregory Clement.
4) Implement IPV6_HDRINCL socket option for raw sockets, from Hannes
Frederic Sowa.
5) Add support for T6 adapter to cxgb4 driver, from Hariprasad Shenai.
6) Add support for VLAN device bridging to mlxsw switch driver, from
Ido Schimmel.
7) Add driver for Netronome NFP4000/NFP6000, from Jakub Kicinski.
8) Provide hwmon interface to mlxsw switch driver, from Jiri Pirko.
9) Reorganize wireless drivers into per-vendor directories just like we
do for ethernet drivers. From Kalle Valo.
10) Provide a way for administrators "destroy" connected sockets via the
SOCK_DESTROY socket netlink diag operation. From Lorenzo Colitti.
11) Add support to add/remove multicast routes via netlink, from Nikolay
Aleksandrov.
12) Make TCP keepalive settings per-namespace, from Nikolay Borisov.
13) Add forwarding and packet duplication facilities to nf_tables, from
Pablo Neira Ayuso.
14) Dead route support in MPLS, from Roopa Prabhu.
15) TSO support for thunderx chips, from Sunil Goutham.
16) Add driver for IBM's System i/p VNIC protocol, from Thomas Falcon.
17) Rationalize, consolidate, and more completely document the checksum
offloading facilities in the networking stack. From Tom Herbert.
18) Support aborting an ongoing scan in mac80211/cfg80211, from
Vidyullatha Kanchanapally.
19) Use per-bucket spinlock for bpf hash facility, from Tom Leiming.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1375 commits)
net: bnxt: always return values from _bnxt_get_max_rings
net: bpf: reject invalid shifts
phonet: properly unshare skbs in phonet_rcv()
dwc_eth_qos: Fix dma address for multi-fragment skbs
phy: remove an unneeded condition
mdio: remove an unneed condition
mdio_bus: NULL dereference on allocation error
net: Fix typo in netdev_intersect_features
net: freescale: mac-fec: Fix build error from phy_device API change
net: freescale: ucc_geth: Fix build error from phy_device API change
bonding: Prevent IPv6 link local address on enslaved devices
IB/mlx5: Add flow steering support
net/mlx5_core: Export flow steering API
net/mlx5_core: Make ipv4/ipv6 location more clear
net/mlx5_core: Enable flow steering support for the IB driver
net/mlx5_core: Initialize namespaces only when supported by device
net/mlx5_core: Set priority attributes
net/mlx5_core: Connect flow tables
net/mlx5_core: Introduce modify flow table command
net/mlx5_core: Managing root flow table
...
This commit is contained in:
28
net/6lowpan/6lowpan_i.h
Normal file
28
net/6lowpan/6lowpan_i.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef __6LOWPAN_I_H
|
||||
#define __6LOWPAN_I_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#ifdef CONFIG_6LOWPAN_DEBUGFS
|
||||
int lowpan_dev_debugfs_init(struct net_device *dev);
|
||||
void lowpan_dev_debugfs_exit(struct net_device *dev);
|
||||
|
||||
int __init lowpan_debugfs_init(void);
|
||||
void lowpan_debugfs_exit(void);
|
||||
#else
|
||||
static inline int lowpan_dev_debugfs_init(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lowpan_dev_debugfs_exit(struct net_device *dev) { }
|
||||
|
||||
static inline int __init lowpan_debugfs_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lowpan_debugfs_exit(void) { }
|
||||
#endif /* CONFIG_6LOWPAN_DEBUGFS */
|
||||
|
||||
#endif /* __6LOWPAN_I_H */
|
||||
@@ -5,12 +5,21 @@ menuconfig 6LOWPAN
|
||||
This enables IPv6 over Low power Wireless Personal Area Network -
|
||||
"6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.
|
||||
|
||||
config 6LOWPAN_DEBUGFS
|
||||
bool "6LoWPAN debugfs support"
|
||||
depends on 6LOWPAN
|
||||
depends on DEBUG_FS
|
||||
---help---
|
||||
This enables 6LoWPAN debugfs support. For example to manipulate
|
||||
IPHC context information at runtime.
|
||||
|
||||
menuconfig 6LOWPAN_NHC
|
||||
tristate "Next Header Compression Support"
|
||||
tristate "Next Header and Generic Header Compression Support"
|
||||
depends on 6LOWPAN
|
||||
default y
|
||||
---help---
|
||||
Support for next header compression.
|
||||
Support for next header and generic header compression defined in
|
||||
RFC6282 and RFC7400.
|
||||
|
||||
if 6LOWPAN_NHC
|
||||
|
||||
@@ -58,4 +67,38 @@ config 6LOWPAN_NHC_UDP
|
||||
---help---
|
||||
6LoWPAN IPv6 UDP Header compression according to RFC6282.
|
||||
|
||||
config 6LOWPAN_GHC_EXT_HDR_HOP
|
||||
tristate "GHC Hop-by-Hop Options Header Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 Hop-by-Hop option generic header compression according
|
||||
to RFC7400.
|
||||
|
||||
config 6LOWPAN_GHC_UDP
|
||||
tristate "GHC UDP Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 UDP generic header compression according to RFC7400.
|
||||
|
||||
config 6LOWPAN_GHC_ICMPV6
|
||||
tristate "GHC ICMPv6 Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 ICMPv6 generic header compression according to RFC7400.
|
||||
|
||||
config 6LOWPAN_GHC_EXT_HDR_DEST
|
||||
tristate "GHC Destination Options Header Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 destination option generic header compression according
|
||||
to RFC7400.
|
||||
|
||||
config 6LOWPAN_GHC_EXT_HDR_FRAG
|
||||
tristate "GHC Fragmentation Options Header Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 fragmentation option generic header compression
|
||||
according to RFC7400.
|
||||
|
||||
config 6LOWPAN_GHC_EXT_HDR_ROUTE
|
||||
tristate "GHC Routing Options Header Support"
|
||||
---help---
|
||||
6LoWPAN IPv6 routing option generic header compression according
|
||||
to RFC7400.
|
||||
|
||||
endif
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
|
||||
|
||||
6lowpan-y := core.o iphc.o nhc.o
|
||||
6lowpan-$(CONFIG_6LOWPAN_DEBUGFS) += debugfs.o
|
||||
|
||||
#rfc6282 nhcs
|
||||
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
|
||||
@@ -10,3 +11,11 @@ obj-$(CONFIG_6LOWPAN_NHC_IPV6) += nhc_ipv6.o
|
||||
obj-$(CONFIG_6LOWPAN_NHC_MOBILITY) += nhc_mobility.o
|
||||
obj-$(CONFIG_6LOWPAN_NHC_ROUTING) += nhc_routing.o
|
||||
obj-$(CONFIG_6LOWPAN_NHC_UDP) += nhc_udp.o
|
||||
|
||||
#rfc7400 ghcs
|
||||
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_HOP) += nhc_ghc_ext_hop.o
|
||||
obj-$(CONFIG_6LOWPAN_GHC_UDP) += nhc_ghc_udp.o
|
||||
obj-$(CONFIG_6LOWPAN_GHC_ICMPV6) += nhc_ghc_icmpv6.o
|
||||
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_DEST) += nhc_ghc_ext_dest.o
|
||||
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG) += nhc_ghc_ext_frag.o
|
||||
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE) += nhc_ghc_ext_route.o
|
||||
|
||||
@@ -15,19 +15,67 @@
|
||||
|
||||
#include <net/6lowpan.h>
|
||||
|
||||
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
|
||||
#include "6lowpan_i.h"
|
||||
|
||||
int lowpan_register_netdevice(struct net_device *dev,
|
||||
enum lowpan_lltypes lltype)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev->addr_len = EUI64_ADDR_LEN;
|
||||
dev->type = ARPHRD_6LOWPAN;
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
dev->priv_flags |= IFF_NO_QUEUE;
|
||||
|
||||
lowpan_priv(dev)->lltype = lltype;
|
||||
|
||||
ret = register_netdevice(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = lowpan_dev_debugfs_init(dev);
|
||||
if (ret < 0)
|
||||
unregister_netdevice(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(lowpan_netdev_setup);
|
||||
EXPORT_SYMBOL(lowpan_register_netdevice);
|
||||
|
||||
int lowpan_register_netdev(struct net_device *dev,
|
||||
enum lowpan_lltypes lltype)
|
||||
{
|
||||
int ret;
|
||||
|
||||
rtnl_lock();
|
||||
ret = lowpan_register_netdevice(dev, lltype);
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(lowpan_register_netdev);
|
||||
|
||||
void lowpan_unregister_netdevice(struct net_device *dev)
|
||||
{
|
||||
unregister_netdevice(dev);
|
||||
lowpan_dev_debugfs_exit(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(lowpan_unregister_netdevice);
|
||||
|
||||
void lowpan_unregister_netdev(struct net_device *dev)
|
||||
{
|
||||
rtnl_lock();
|
||||
lowpan_unregister_netdevice(dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(lowpan_unregister_netdev);
|
||||
|
||||
static int __init lowpan_module_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = lowpan_debugfs_init();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
request_module_nowait("ipv6");
|
||||
|
||||
request_module_nowait("nhc_dest");
|
||||
@@ -40,6 +88,13 @@ static int __init lowpan_module_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit lowpan_module_exit(void)
|
||||
{
|
||||
lowpan_debugfs_exit();
|
||||
}
|
||||
|
||||
module_init(lowpan_module_init);
|
||||
module_exit(lowpan_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
53
net/6lowpan/debugfs.c
Normal file
53
net/6lowpan/debugfs.c
Normal file
@@ -0,0 +1,53 @@
|
||||
/* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Authors:
|
||||
* (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
|
||||
* Copyright (c) 2015 Nordic Semiconductor. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <net/6lowpan.h>
|
||||
|
||||
#include "6lowpan_i.h"
|
||||
|
||||
static struct dentry *lowpan_debugfs;
|
||||
|
||||
int lowpan_dev_debugfs_init(struct net_device *dev)
|
||||
{
|
||||
struct lowpan_priv *lpriv = lowpan_priv(dev);
|
||||
|
||||
/* creating the root */
|
||||
lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
|
||||
if (!lpriv->iface_debugfs)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void lowpan_dev_debugfs_exit(struct net_device *dev)
|
||||
{
|
||||
debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs);
|
||||
}
|
||||
|
||||
int __init lowpan_debugfs_init(void)
|
||||
{
|
||||
lowpan_debugfs = debugfs_create_dir("6lowpan", NULL);
|
||||
if (!lowpan_debugfs)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void lowpan_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(lowpan_debugfs);
|
||||
}
|
||||
27
net/6lowpan/nhc_ghc_ext_dest.c
Normal file
27
net/6lowpan/nhc_ghc_ext_dest.c
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* 6LoWPAN Extension Header compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_EXT_DEST_IDLEN 1
|
||||
#define LOWPAN_GHC_EXT_DEST_ID_0 0xb6
|
||||
#define LOWPAN_GHC_EXT_DEST_MASK_0 0xfe
|
||||
|
||||
static void dest_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_EXT_DEST_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_EXT_DEST_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_ext_dest, "RFC7400 Destination Extension Header", NEXTHDR_DEST,
|
||||
0, dest_ghid_setup, LOWPAN_GHC_EXT_DEST_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_ext_dest);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header destination extension compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
28
net/6lowpan/nhc_ghc_ext_frag.c
Normal file
28
net/6lowpan/nhc_ghc_ext_frag.c
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* 6LoWPAN Extension Header compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_EXT_FRAG_IDLEN 1
|
||||
#define LOWPAN_GHC_EXT_FRAG_ID_0 0xb4
|
||||
#define LOWPAN_GHC_EXT_FRAG_MASK_0 0xfe
|
||||
|
||||
static void frag_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_EXT_FRAG_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_EXT_FRAG_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_ext_frag, "RFC7400 Fragmentation Extension Header",
|
||||
NEXTHDR_FRAGMENT, 0, frag_ghid_setup,
|
||||
LOWPAN_GHC_EXT_FRAG_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_ext_frag);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header fragmentation extension compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
27
net/6lowpan/nhc_ghc_ext_hop.c
Normal file
27
net/6lowpan/nhc_ghc_ext_hop.c
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* 6LoWPAN Extension Header compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_EXT_HOP_IDLEN 1
|
||||
#define LOWPAN_GHC_EXT_HOP_ID_0 0xb0
|
||||
#define LOWPAN_GHC_EXT_HOP_MASK_0 0xfe
|
||||
|
||||
static void hop_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_EXT_HOP_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_EXT_HOP_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_ext_hop, "RFC7400 Hop-by-Hop Extension Header", NEXTHDR_HOP, 0,
|
||||
hop_ghid_setup, LOWPAN_GHC_EXT_HOP_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_ext_hop);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header hop-by-hop extension compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
27
net/6lowpan/nhc_ghc_ext_route.c
Normal file
27
net/6lowpan/nhc_ghc_ext_route.c
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* 6LoWPAN Extension Header compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_EXT_ROUTE_IDLEN 1
|
||||
#define LOWPAN_GHC_EXT_ROUTE_ID_0 0xb2
|
||||
#define LOWPAN_GHC_EXT_ROUTE_MASK_0 0xfe
|
||||
|
||||
static void route_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_EXT_ROUTE_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_EXT_ROUTE_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_ext_route, "RFC7400 Routing Extension Header", NEXTHDR_ROUTING,
|
||||
0, route_ghid_setup, LOWPAN_GHC_EXT_ROUTE_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_ext_route);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header routing extension compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
27
net/6lowpan/nhc_ghc_icmpv6.c
Normal file
27
net/6lowpan/nhc_ghc_icmpv6.c
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* 6LoWPAN ICMPv6 compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_ICMPV6_IDLEN 1
|
||||
#define LOWPAN_GHC_ICMPV6_ID_0 0xdf
|
||||
#define LOWPAN_GHC_ICMPV6_MASK_0 0xff
|
||||
|
||||
static void icmpv6_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_ICMPV6_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_ICMPV6_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_icmpv6, "RFC7400 ICMPv6", NEXTHDR_ICMP, 0,
|
||||
icmpv6_ghid_setup, LOWPAN_GHC_ICMPV6_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_icmpv6);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header ICMPv6 compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
27
net/6lowpan/nhc_ghc_udp.c
Normal file
27
net/6lowpan/nhc_ghc_udp.c
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* 6LoWPAN UDP compression according to RFC7400
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_GHC_UDP_IDLEN 1
|
||||
#define LOWPAN_GHC_UDP_ID_0 0xd0
|
||||
#define LOWPAN_GHC_UDP_MASK_0 0xf8
|
||||
|
||||
static void udp_ghid_setup(struct lowpan_nhc *nhc)
|
||||
{
|
||||
nhc->id[0] = LOWPAN_GHC_UDP_ID_0;
|
||||
nhc->idmask[0] = LOWPAN_GHC_UDP_MASK_0;
|
||||
}
|
||||
|
||||
LOWPAN_NHC(ghc_udp, "RFC7400 UDP", NEXTHDR_UDP, 0,
|
||||
udp_ghid_setup, LOWPAN_GHC_UDP_IDLEN, NULL, NULL);
|
||||
|
||||
module_lowpan_nhc(ghc_udp);
|
||||
MODULE_DESCRIPTION("6LoWPAN generic header UDP compression");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/switchdev.h>
|
||||
|
||||
#include "vlan.h"
|
||||
#include "vlanproc.h"
|
||||
@@ -542,9 +543,9 @@ static int vlan_dev_init(struct net_device *dev)
|
||||
(1<<__LINK_STATE_DORMANT))) |
|
||||
(1<<__LINK_STATE_PRESENT);
|
||||
|
||||
dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
|
||||
dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
|
||||
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
|
||||
NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
|
||||
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
|
||||
NETIF_F_ALL_FCOE;
|
||||
|
||||
dev->features |= real_dev->vlan_features | NETIF_F_LLTX |
|
||||
@@ -774,6 +775,12 @@ static const struct net_device_ops vlan_netdev_ops = {
|
||||
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
|
||||
#endif
|
||||
.ndo_fix_features = vlan_dev_fix_features,
|
||||
.ndo_fdb_add = switchdev_port_fdb_add,
|
||||
.ndo_fdb_del = switchdev_port_fdb_del,
|
||||
.ndo_fdb_dump = switchdev_port_fdb_dump,
|
||||
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
|
||||
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
|
||||
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
|
||||
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
|
||||
.ndo_get_iflink = vlan_dev_get_iflink,
|
||||
};
|
||||
|
||||
@@ -48,6 +48,9 @@ config COMPAT_NETLINK_MESSAGES
|
||||
config NET_INGRESS
|
||||
bool
|
||||
|
||||
config NET_EGRESS
|
||||
bool
|
||||
|
||||
menu "Networking options"
|
||||
|
||||
source "net/packet/Kconfig"
|
||||
@@ -250,9 +253,14 @@ config XPS
|
||||
depends on SMP
|
||||
default y
|
||||
|
||||
config SOCK_CGROUP_DATA
|
||||
bool
|
||||
default n
|
||||
|
||||
config CGROUP_NET_PRIO
|
||||
bool "Network priority cgroup"
|
||||
depends on CGROUPS
|
||||
select SOCK_CGROUP_DATA
|
||||
---help---
|
||||
Cgroup subsystem for use in assigning processes to network priorities on
|
||||
a per-interface basis.
|
||||
@@ -260,6 +268,7 @@ config CGROUP_NET_PRIO
|
||||
config CGROUP_NET_CLASSID
|
||||
bool "Network classid cgroup"
|
||||
depends on CGROUPS
|
||||
select SOCK_CGROUP_DATA
|
||||
---help---
|
||||
Cgroup subsystem for use as general purpose socket classid marker that is
|
||||
being used in cls_cgroup and for netfilter matching.
|
||||
|
||||
@@ -96,7 +96,7 @@ static void vcc_def_wakeup(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up(&wq->wait);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@@ -117,7 +117,7 @@ static void vcc_write_space(struct sock *sk)
|
||||
|
||||
if (vcc_writable(sk)) {
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
||||
@@ -21,11 +21,11 @@ struct mpoa_client {
|
||||
uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */
|
||||
|
||||
rwlock_t ingress_lock;
|
||||
struct in_cache_ops *in_ops; /* ingress cache operations */
|
||||
const struct in_cache_ops *in_ops; /* ingress cache operations */
|
||||
in_cache_entry *in_cache; /* the ingress cache of this MPC */
|
||||
|
||||
rwlock_t egress_lock;
|
||||
struct eg_cache_ops *eg_ops; /* egress cache operations */
|
||||
const struct eg_cache_ops *eg_ops; /* egress cache operations */
|
||||
eg_cache_entry *eg_cache; /* the egress cache of this MPC */
|
||||
|
||||
uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */
|
||||
|
||||
@@ -534,7 +534,7 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
|
||||
}
|
||||
|
||||
|
||||
static struct in_cache_ops ingress_ops = {
|
||||
static const struct in_cache_ops ingress_ops = {
|
||||
in_cache_add_entry, /* add_entry */
|
||||
in_cache_get, /* get */
|
||||
in_cache_get_with_mask, /* get_with_mask */
|
||||
@@ -548,7 +548,7 @@ static struct in_cache_ops ingress_ops = {
|
||||
in_destroy_cache /* destroy_cache */
|
||||
};
|
||||
|
||||
static struct eg_cache_ops egress_ops = {
|
||||
static const struct eg_cache_ops egress_ops = {
|
||||
eg_cache_add_entry, /* add_entry */
|
||||
eg_cache_get_by_cache_id, /* get_by_cache_id */
|
||||
eg_cache_get_by_tag, /* get_by_tag */
|
||||
|
||||
@@ -185,7 +185,8 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
|
||||
static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
|
||||
int max_if_num, int del_if_num)
|
||||
{
|
||||
int chunk_size, ret = -ENOMEM, if_offset;
|
||||
int ret = -ENOMEM;
|
||||
size_t chunk_size, if_offset;
|
||||
void *data_ptr = NULL;
|
||||
|
||||
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
|
||||
@@ -203,8 +204,9 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
|
||||
memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
|
||||
|
||||
/* copy second part */
|
||||
if_offset = (del_if_num + 1) * chunk_size;
|
||||
memcpy((char *)data_ptr + del_if_num * chunk_size,
|
||||
orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
|
||||
(uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
|
||||
(max_if_num - del_if_num) * chunk_size);
|
||||
|
||||
free_bcast_own:
|
||||
@@ -361,7 +363,6 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
|
||||
unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
|
||||
|
||||
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
|
||||
batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
|
||||
batadv_ogm_packet->ttl = BATADV_TTL;
|
||||
}
|
||||
|
||||
@@ -842,8 +843,6 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
|
||||
"Forwarding packet: tq: %i, ttl: %i\n",
|
||||
batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
|
||||
|
||||
/* switch of primaries first hop flag when forwarding */
|
||||
batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
|
||||
if (is_single_hop_neigh)
|
||||
batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
|
||||
else
|
||||
@@ -1379,6 +1378,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
|
||||
struct batadv_hard_iface *if_outgoing)
|
||||
{
|
||||
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
||||
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
|
||||
struct batadv_neigh_node *router = NULL;
|
||||
struct batadv_neigh_node *router_router = NULL;
|
||||
struct batadv_orig_node *orig_neigh_node;
|
||||
@@ -1423,6 +1423,13 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_single_hop_neigh) {
|
||||
hardif_neigh = batadv_hardif_neigh_get(if_incoming,
|
||||
ethhdr->h_source);
|
||||
if (hardif_neigh)
|
||||
hardif_neigh->last_seen = jiffies;
|
||||
}
|
||||
|
||||
router = batadv_orig_router_get(orig_node, if_outgoing);
|
||||
if (router) {
|
||||
router_router = batadv_orig_router_get(router->orig_node,
|
||||
@@ -1557,6 +1564,8 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
|
||||
batadv_neigh_node_free_ref(router_router);
|
||||
if (orig_neigh_router)
|
||||
batadv_neigh_node_free_ref(orig_neigh_router);
|
||||
if (hardif_neigh)
|
||||
batadv_hardif_neigh_free_ref(hardif_neigh);
|
||||
|
||||
kfree_skb(skb_priv);
|
||||
}
|
||||
@@ -1861,6 +1870,58 @@ static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
|
||||
seq_puts(seq, "No batman nodes in range ...\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_hardif_neigh_print - print a single hop neighbour node
|
||||
* @seq: neighbour table seq_file struct
|
||||
* @hardif_neigh: hardif neighbour information
|
||||
*/
|
||||
static void
|
||||
batadv_iv_hardif_neigh_print(struct seq_file *seq,
|
||||
struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
{
|
||||
int last_secs, last_msecs;
|
||||
|
||||
last_secs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) / 1000;
|
||||
last_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) % 1000;
|
||||
|
||||
seq_printf(seq, " %10s %pM %4i.%03is\n",
|
||||
hardif_neigh->if_incoming->net_dev->name,
|
||||
hardif_neigh->addr, last_secs, last_msecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_neigh_print - print the single hop neighbour list
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @seq: neighbour table seq_file struct
|
||||
*/
|
||||
static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct net_device *net_dev = (struct net_device *)seq->private;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
struct batadv_hard_iface *hard_iface;
|
||||
int batman_count = 0;
|
||||
|
||||
seq_printf(seq, " %10s %-13s %s\n",
|
||||
"IF", "Neighbor", "last-seen");
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
|
||||
if (hard_iface->soft_iface != net_dev)
|
||||
continue;
|
||||
|
||||
hlist_for_each_entry_rcu(hardif_neigh,
|
||||
&hard_iface->neigh_list, list) {
|
||||
batadv_iv_hardif_neigh_print(seq, hardif_neigh);
|
||||
batman_count++;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (batman_count == 0)
|
||||
seq_puts(seq, "No batman nodes in range ...\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
|
||||
* @neigh1: the first neighbor object of the comparison
|
||||
@@ -1902,8 +1963,8 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
|
||||
* neigh2 from the metric prospective
|
||||
* batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
|
||||
* than neigh2 from the metric prospective
|
||||
* @neigh1: the first neighbor object of the comparison
|
||||
* @if_outgoing1: outgoing interface for the first neighbor
|
||||
* @neigh2: the second neighbor object of the comparison
|
||||
@@ -1913,7 +1974,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
|
||||
* the metric via neigh2, false otherwise.
|
||||
*/
|
||||
static bool
|
||||
batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
|
||||
batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
|
||||
struct batadv_hard_iface *if_outgoing1,
|
||||
struct batadv_neigh_node *neigh2,
|
||||
struct batadv_hard_iface *if_outgoing2)
|
||||
@@ -1953,7 +2014,8 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
|
||||
.bat_ogm_schedule = batadv_iv_ogm_schedule,
|
||||
.bat_ogm_emit = batadv_iv_ogm_emit,
|
||||
.bat_neigh_cmp = batadv_iv_ogm_neigh_cmp,
|
||||
.bat_neigh_is_equiv_or_better = batadv_iv_ogm_neigh_is_eob,
|
||||
.bat_neigh_is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
|
||||
.bat_neigh_print = batadv_iv_neigh_print,
|
||||
.bat_orig_print = batadv_iv_ogm_orig_print,
|
||||
.bat_orig_free = batadv_iv_ogm_orig_free,
|
||||
.bat_orig_add_if = batadv_iv_ogm_orig_add_if,
|
||||
|
||||
@@ -260,7 +260,9 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
|
||||
}
|
||||
|
||||
/* all claims gone, initialize CRC */
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_gw->crc = BATADV_BLA_CRC_INIT;
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -408,6 +410,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
|
||||
entry->lasttime = jiffies;
|
||||
entry->crc = BATADV_BLA_CRC_INIT;
|
||||
entry->bat_priv = bat_priv;
|
||||
spin_lock_init(&entry->crc_lock);
|
||||
atomic_set(&entry->request_sent, 0);
|
||||
atomic_set(&entry->wait_periods, 0);
|
||||
ether_addr_copy(entry->orig, orig);
|
||||
@@ -557,7 +560,9 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
|
||||
__be16 crc;
|
||||
|
||||
memcpy(mac, batadv_announce_mac, 4);
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
crc = htons(backbone_gw->crc);
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
memcpy(&mac[4], &crc, 2);
|
||||
|
||||
batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
|
||||
@@ -618,14 +623,18 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
|
||||
"bla_add_claim(): changing ownership for %pM, vid %d\n",
|
||||
mac, BATADV_PRINT_VID(vid));
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
batadv_backbone_gw_free_ref(claim->backbone_gw);
|
||||
}
|
||||
/* set (new) backbone gw */
|
||||
atomic_inc(&backbone_gw->refcount);
|
||||
claim->backbone_gw = backbone_gw;
|
||||
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
backbone_gw->lasttime = jiffies;
|
||||
|
||||
claim_free_ref:
|
||||
@@ -653,7 +662,9 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
|
||||
batadv_choose_claim, claim);
|
||||
batadv_claim_free_ref(claim); /* reference from the hash is gone */
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
|
||||
/* don't need the reference from hash_find() anymore */
|
||||
batadv_claim_free_ref(claim);
|
||||
@@ -664,7 +675,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
|
||||
u8 *backbone_addr, unsigned short vid)
|
||||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
u16 crc;
|
||||
u16 backbone_crc, crc;
|
||||
|
||||
if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
|
||||
return 0;
|
||||
@@ -683,12 +694,16 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
|
||||
"handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
|
||||
BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
|
||||
|
||||
if (backbone_gw->crc != crc) {
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_crc = backbone_gw->crc;
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
|
||||
if (backbone_crc != crc) {
|
||||
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
|
||||
"handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
|
||||
backbone_gw->orig,
|
||||
BATADV_PRINT_VID(backbone_gw->vid),
|
||||
backbone_gw->crc, crc);
|
||||
backbone_crc, crc);
|
||||
|
||||
batadv_bla_send_request(backbone_gw);
|
||||
} else {
|
||||
@@ -1153,6 +1168,26 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_bla_status_update - purge bla interfaces if necessary
|
||||
* @net_dev: the soft interface net device
|
||||
*/
|
||||
void batadv_bla_status_update(struct net_device *net_dev)
|
||||
{
|
||||
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct batadv_hard_iface *primary_if;
|
||||
|
||||
primary_if = batadv_primary_if_get_selected(bat_priv);
|
||||
if (!primary_if)
|
||||
return;
|
||||
|
||||
/* this function already purges everything when bla is disabled,
|
||||
* so just call that one.
|
||||
*/
|
||||
batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
|
||||
batadv_hardif_free_ref(primary_if);
|
||||
}
|
||||
|
||||
/* periodic work to do:
|
||||
* * purge structures when they are too old
|
||||
* * send announcements
|
||||
@@ -1647,6 +1682,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct batadv_bla_claim *claim;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
struct hlist_head *head;
|
||||
u16 backbone_crc;
|
||||
u32 i;
|
||||
bool is_own;
|
||||
u8 *primary_addr;
|
||||
@@ -1669,11 +1705,15 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
|
||||
hlist_for_each_entry_rcu(claim, head, hash_entry) {
|
||||
is_own = batadv_compare_eth(claim->backbone_gw->orig,
|
||||
primary_addr);
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
backbone_crc = claim->backbone_gw->crc;
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
|
||||
claim->addr, BATADV_PRINT_VID(claim->vid),
|
||||
claim->backbone_gw->orig,
|
||||
(is_own ? 'x' : ' '),
|
||||
claim->backbone_gw->crc);
|
||||
backbone_crc);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@@ -1692,6 +1732,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct batadv_hard_iface *primary_if;
|
||||
struct hlist_head *head;
|
||||
int secs, msecs;
|
||||
u16 backbone_crc;
|
||||
u32 i;
|
||||
bool is_own;
|
||||
u8 *primary_addr;
|
||||
@@ -1722,10 +1763,14 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
|
||||
if (is_own)
|
||||
continue;
|
||||
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_crc = backbone_gw->crc;
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
|
||||
seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
|
||||
backbone_gw->orig,
|
||||
BATADV_PRINT_VID(backbone_gw->vid), secs,
|
||||
msecs, backbone_gw->crc);
|
||||
msecs, backbone_crc);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct net_device;
|
||||
struct seq_file;
|
||||
struct sk_buff;
|
||||
|
||||
@@ -42,6 +43,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
|
||||
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
|
||||
struct batadv_hard_iface *primary_if,
|
||||
struct batadv_hard_iface *oldif);
|
||||
void batadv_bla_status_update(struct net_device *net_dev);
|
||||
int batadv_bla_init(struct batadv_priv *bat_priv);
|
||||
void batadv_bla_free(struct batadv_priv *bat_priv);
|
||||
|
||||
|
||||
@@ -262,6 +262,13 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
|
||||
return single_open(file, batadv_algo_seq_print_text, NULL);
|
||||
}
|
||||
|
||||
static int neighbors_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct net_device *net_dev = (struct net_device *)inode->i_private;
|
||||
|
||||
return single_open(file, batadv_hardif_neigh_seq_print_text, net_dev);
|
||||
}
|
||||
|
||||
static int batadv_originators_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct net_device *net_dev = (struct net_device *)inode->i_private;
|
||||
@@ -375,6 +382,7 @@ static struct batadv_debuginfo *batadv_general_debuginfos[] = {
|
||||
};
|
||||
|
||||
/* The following attributes are per soft interface */
|
||||
static BATADV_DEBUGINFO(neighbors, S_IRUGO, neighbors_open);
|
||||
static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
|
||||
static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
|
||||
static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
|
||||
@@ -394,6 +402,7 @@ static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
|
||||
#endif
|
||||
|
||||
static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
|
||||
&batadv_debuginfo_neighbors,
|
||||
&batadv_debuginfo_originators,
|
||||
&batadv_debuginfo_gateways,
|
||||
&batadv_debuginfo_transtable_global,
|
||||
|
||||
@@ -71,14 +71,14 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
|
||||
|
||||
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
|
||||
chain = &orig_node->fragments[i];
|
||||
spin_lock_bh(&orig_node->fragments[i].lock);
|
||||
spin_lock_bh(&chain->lock);
|
||||
|
||||
if (!check_cb || check_cb(chain)) {
|
||||
batadv_frag_clear_chain(&orig_node->fragments[i].head);
|
||||
orig_node->fragments[i].size = 0;
|
||||
batadv_frag_clear_chain(&chain->head);
|
||||
chain->size = 0;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&orig_node->fragments[i].lock);
|
||||
spin_unlock_bh(&chain->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,27 +31,23 @@
|
||||
#include "packet.h"
|
||||
|
||||
/**
|
||||
* batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
|
||||
* and upload bandwidth information
|
||||
* batadv_parse_throughput - parse supplied string buffer to extract throughput
|
||||
* information
|
||||
* @net_dev: the soft interface net device
|
||||
* @buff: string buffer to parse
|
||||
* @down: pointer holding the returned download bandwidth information
|
||||
* @up: pointer holding the returned upload bandwidth information
|
||||
* @description: text shown when throughput string cannot be parsed
|
||||
* @throughput: pointer holding the returned throughput information
|
||||
*
|
||||
* Returns false on parse error and true otherwise.
|
||||
*/
|
||||
static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
|
||||
u32 *down, u32 *up)
|
||||
static bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
|
||||
const char *description, u32 *throughput)
|
||||
{
|
||||
enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
|
||||
char *slash_ptr, *tmp_ptr;
|
||||
u64 ldown, lup;
|
||||
u64 lthroughput;
|
||||
char *tmp_ptr;
|
||||
int ret;
|
||||
|
||||
slash_ptr = strchr(buff, '/');
|
||||
if (slash_ptr)
|
||||
*slash_ptr = 0;
|
||||
|
||||
if (strlen(buff) > 4) {
|
||||
tmp_ptr = buff + strlen(buff) - 4;
|
||||
|
||||
@@ -63,90 +59,75 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
|
||||
*tmp_ptr = '\0';
|
||||
}
|
||||
|
||||
ret = kstrtou64(buff, 10, &ldown);
|
||||
ret = kstrtou64(buff, 10, <hroughput);
|
||||
if (ret) {
|
||||
batadv_err(net_dev,
|
||||
"Download speed of gateway mode invalid: %s\n",
|
||||
buff);
|
||||
"Invalid throughput speed for %s: %s\n",
|
||||
description, buff);
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (bw_unit_type) {
|
||||
case BATADV_BW_UNIT_MBIT:
|
||||
/* prevent overflow */
|
||||
if (U64_MAX / 10 < ldown) {
|
||||
if (U64_MAX / 10 < lthroughput) {
|
||||
batadv_err(net_dev,
|
||||
"Download speed of gateway mode too large: %s\n",
|
||||
buff);
|
||||
"Throughput speed for %s too large: %s\n",
|
||||
description, buff);
|
||||
return false;
|
||||
}
|
||||
|
||||
ldown *= 10;
|
||||
lthroughput *= 10;
|
||||
break;
|
||||
case BATADV_BW_UNIT_KBIT:
|
||||
default:
|
||||
ldown = div_u64(ldown, 100);
|
||||
lthroughput = div_u64(lthroughput, 100);
|
||||
break;
|
||||
}
|
||||
|
||||
if (U32_MAX < ldown) {
|
||||
if (lthroughput > U32_MAX) {
|
||||
batadv_err(net_dev,
|
||||
"Download speed of gateway mode too large: %s\n",
|
||||
buff);
|
||||
"Throughput speed for %s too large: %s\n",
|
||||
description, buff);
|
||||
return false;
|
||||
}
|
||||
|
||||
*down = ldown;
|
||||
*throughput = lthroughput;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
|
||||
* and upload bandwidth information
|
||||
* @net_dev: the soft interface net device
|
||||
* @buff: string buffer to parse
|
||||
* @down: pointer holding the returned download bandwidth information
|
||||
* @up: pointer holding the returned upload bandwidth information
|
||||
*
|
||||
* Return: false on parse error and true otherwise.
|
||||
*/
|
||||
static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
|
||||
u32 *down, u32 *up)
|
||||
{
|
||||
char *slash_ptr;
|
||||
bool ret;
|
||||
|
||||
slash_ptr = strchr(buff, '/');
|
||||
if (slash_ptr)
|
||||
*slash_ptr = 0;
|
||||
|
||||
ret = batadv_parse_throughput(net_dev, buff, "download gateway speed",
|
||||
down);
|
||||
if (!ret)
|
||||
return false;
|
||||
|
||||
/* we also got some upload info */
|
||||
if (slash_ptr) {
|
||||
bw_unit_type = BATADV_BW_UNIT_KBIT;
|
||||
|
||||
if (strlen(slash_ptr + 1) > 4) {
|
||||
tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1);
|
||||
|
||||
if (strncasecmp(tmp_ptr, "mbit", 4) == 0)
|
||||
bw_unit_type = BATADV_BW_UNIT_MBIT;
|
||||
|
||||
if ((strncasecmp(tmp_ptr, "kbit", 4) == 0) ||
|
||||
(bw_unit_type == BATADV_BW_UNIT_MBIT))
|
||||
*tmp_ptr = '\0';
|
||||
}
|
||||
|
||||
ret = kstrtou64(slash_ptr + 1, 10, &lup);
|
||||
if (ret) {
|
||||
batadv_err(net_dev,
|
||||
"Upload speed of gateway mode invalid: %s\n",
|
||||
slash_ptr + 1);
|
||||
ret = batadv_parse_throughput(net_dev, slash_ptr + 1,
|
||||
"upload gateway speed", up);
|
||||
if (!ret)
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (bw_unit_type) {
|
||||
case BATADV_BW_UNIT_MBIT:
|
||||
/* prevent overflow */
|
||||
if (U64_MAX / 10 < lup) {
|
||||
batadv_err(net_dev,
|
||||
"Upload speed of gateway mode too large: %s\n",
|
||||
slash_ptr + 1);
|
||||
return false;
|
||||
}
|
||||
|
||||
lup *= 10;
|
||||
break;
|
||||
case BATADV_BW_UNIT_KBIT:
|
||||
default:
|
||||
lup = div_u64(lup, 100);
|
||||
break;
|
||||
}
|
||||
|
||||
if (U32_MAX < lup) {
|
||||
batadv_err(net_dev,
|
||||
"Upload speed of gateway mode too large: %s\n",
|
||||
slash_ptr + 1);
|
||||
return false;
|
||||
}
|
||||
|
||||
*up = lup;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/net_namespace.h>
|
||||
|
||||
@@ -464,7 +465,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
|
||||
hard_iface->soft_iface = soft_iface;
|
||||
bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
|
||||
ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface);
|
||||
ret = netdev_master_upper_dev_link(hard_iface->net_dev,
|
||||
soft_iface, NULL, NULL);
|
||||
if (ret)
|
||||
goto err_dev;
|
||||
|
||||
@@ -638,9 +640,12 @@ batadv_hardif_add_interface(struct net_device *net_dev)
|
||||
goto free_sysfs;
|
||||
|
||||
INIT_LIST_HEAD(&hard_iface->list);
|
||||
INIT_HLIST_HEAD(&hard_iface->neigh_list);
|
||||
INIT_WORK(&hard_iface->cleanup_work,
|
||||
batadv_hardif_remove_interface_finish);
|
||||
|
||||
spin_lock_init(&hard_iface->neigh_list_lock);
|
||||
|
||||
hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT;
|
||||
if (batadv_is_wifi_netdev(net_dev))
|
||||
hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
|
||||
@@ -708,7 +713,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
|
||||
}
|
||||
|
||||
hard_iface = batadv_hardif_get_by_netdev(net_dev);
|
||||
if (!hard_iface && event == NETDEV_REGISTER)
|
||||
if (!hard_iface && (event == NETDEV_REGISTER ||
|
||||
event == NETDEV_POST_TYPE_CHANGE))
|
||||
hard_iface = batadv_hardif_add_interface(net_dev);
|
||||
|
||||
if (!hard_iface)
|
||||
@@ -723,6 +729,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
|
||||
batadv_hardif_deactivate_interface(hard_iface);
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
case NETDEV_PRE_TYPE_CHANGE:
|
||||
list_del_rcu(&hard_iface->list);
|
||||
|
||||
batadv_hardif_remove_interface(hard_iface);
|
||||
|
||||
@@ -552,7 +552,7 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
|
||||
!bat_algo_ops->bat_ogm_schedule ||
|
||||
!bat_algo_ops->bat_ogm_emit ||
|
||||
!bat_algo_ops->bat_neigh_cmp ||
|
||||
!bat_algo_ops->bat_neigh_is_equiv_or_better) {
|
||||
!bat_algo_ops->bat_neigh_is_similar_or_better) {
|
||||
pr_info("Routing algo '%s' does not implement required ops\n",
|
||||
bat_algo_ops->name);
|
||||
return -EINVAL;
|
||||
@@ -747,7 +747,7 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
|
||||
static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
|
||||
struct batadv_tvlv_container *tvlv)
|
||||
{
|
||||
lockdep_assert_held(&bat_priv->tvlv.handler_list_lock);
|
||||
lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
|
||||
|
||||
if (!tvlv)
|
||||
return;
|
||||
@@ -908,7 +908,7 @@ u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
|
||||
* appropriate handlers
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @tvlv_handler: tvlv callback function handling the tvlv content
|
||||
* @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
|
||||
* @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
|
||||
* @orig_node: orig node emitting the ogm packet
|
||||
* @src: source mac address of the unicast packet
|
||||
* @dst: destination mac address of the unicast packet
|
||||
@@ -961,7 +961,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
|
||||
* batadv_tvlv_containers_process - parse the given tvlv buffer to call the
|
||||
* appropriate handlers
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
|
||||
* @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
|
||||
* @orig_node: orig node emitting the ogm packet
|
||||
* @src: source mac address of the unicast packet
|
||||
* @dst: destination mac address of the unicast packet
|
||||
@@ -1143,15 +1143,14 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
|
||||
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
|
||||
struct batadv_tvlv_hdr *tvlv_hdr;
|
||||
struct batadv_orig_node *orig_node;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct sk_buff *skb;
|
||||
unsigned char *tvlv_buff;
|
||||
unsigned int tvlv_len;
|
||||
ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
|
||||
bool ret = false;
|
||||
|
||||
orig_node = batadv_orig_hash_find(bat_priv, dst);
|
||||
if (!orig_node)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
|
||||
|
||||
@@ -1180,14 +1179,10 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
|
||||
tvlv_buff += sizeof(*tvlv_hdr);
|
||||
memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
|
||||
|
||||
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
if (skb && !ret)
|
||||
if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
|
||||
kfree_skb(skb);
|
||||
if (orig_node)
|
||||
batadv_orig_node_free_ref(orig_node);
|
||||
out:
|
||||
batadv_orig_node_free_ref(orig_node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#define BATADV_DRIVER_DEVICE "batman-adv"
|
||||
|
||||
#ifndef BATADV_SOURCE_VERSION
|
||||
#define BATADV_SOURCE_VERSION "2015.2"
|
||||
#define BATADV_SOURCE_VERSION "2016.0"
|
||||
#endif
|
||||
|
||||
/* B.A.T.M.A.N. parameters */
|
||||
@@ -109,7 +109,7 @@
|
||||
#define BATADV_MAX_AGGREGATION_MS 100
|
||||
|
||||
#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
|
||||
#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
|
||||
#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 6)
|
||||
#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
|
||||
#define BATADV_BLA_WAIT_PERIODS 3
|
||||
|
||||
|
||||
@@ -244,9 +244,7 @@ static void batadv_nc_path_free_ref(struct batadv_nc_path *nc_path)
|
||||
*/
|
||||
static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
|
||||
{
|
||||
if (nc_packet->skb)
|
||||
kfree_skb(nc_packet->skb);
|
||||
|
||||
kfree_skb(nc_packet->skb);
|
||||
batadv_nc_path_free_ref(nc_packet->nc_path);
|
||||
kfree(nc_packet);
|
||||
}
|
||||
|
||||
@@ -201,6 +201,47 @@ void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_rcu - free the hardif neigh_node
|
||||
* @rcu: rcu pointer of the neigh_node
|
||||
*/
|
||||
static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
|
||||
hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu);
|
||||
|
||||
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
hlist_del_init_rcu(&hardif_neigh->list);
|
||||
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
|
||||
batadv_hardif_free_ref_now(hardif_neigh->if_incoming);
|
||||
kfree(hardif_neigh);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_now - decrement the hardif neighbors refcounter
|
||||
* and possibly free it (without rcu callback)
|
||||
* @hardif_neigh: hardif neigh neighbor to free
|
||||
*/
|
||||
static void
|
||||
batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&hardif_neigh->refcount))
|
||||
batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
|
||||
* and possibly free it
|
||||
* @hardif_neigh: hardif neigh neighbor to free
|
||||
*/
|
||||
void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&hardif_neigh->refcount))
|
||||
call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_rcu - free the neigh_node
|
||||
* @rcu: rcu pointer of the neigh_node
|
||||
@@ -209,6 +250,7 @@ static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
struct batadv_algo_ops *bao;
|
||||
|
||||
@@ -220,6 +262,14 @@ static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
|
||||
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
|
||||
}
|
||||
|
||||
hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
|
||||
neigh_node->addr);
|
||||
if (hardif_neigh) {
|
||||
/* batadv_hardif_neigh_get() increases refcount too */
|
||||
batadv_hardif_neigh_free_now(hardif_neigh);
|
||||
batadv_hardif_neigh_free_now(hardif_neigh);
|
||||
}
|
||||
|
||||
if (bao->bat_neigh_free)
|
||||
bao->bat_neigh_free(neigh_node);
|
||||
|
||||
@@ -478,6 +528,106 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_create - create a hardif neighbour node
|
||||
* @hard_iface: the interface this neighbour is connected to
|
||||
* @neigh_addr: the interface address of the neighbour to retrieve
|
||||
*
|
||||
* Returns the hardif neighbour node if found or created or NULL otherwise.
|
||||
*/
|
||||
static struct batadv_hardif_neigh_node *
|
||||
batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
|
||||
const u8 *neigh_addr)
|
||||
{
|
||||
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
|
||||
|
||||
spin_lock_bh(&hard_iface->neigh_list_lock);
|
||||
|
||||
/* check if neighbor hasn't been added in the meantime */
|
||||
hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
|
||||
if (hardif_neigh)
|
||||
goto out;
|
||||
|
||||
if (!atomic_inc_not_zero(&hard_iface->refcount))
|
||||
goto out;
|
||||
|
||||
hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
|
||||
if (!hardif_neigh) {
|
||||
batadv_hardif_free_ref(hard_iface);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&hardif_neigh->list);
|
||||
ether_addr_copy(hardif_neigh->addr, neigh_addr);
|
||||
hardif_neigh->if_incoming = hard_iface;
|
||||
hardif_neigh->last_seen = jiffies;
|
||||
|
||||
atomic_set(&hardif_neigh->refcount, 1);
|
||||
|
||||
if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
|
||||
bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
|
||||
|
||||
hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&hard_iface->neigh_list_lock);
|
||||
return hardif_neigh;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
|
||||
* node
|
||||
* @hard_iface: the interface this neighbour is connected to
|
||||
* @neigh_addr: the interface address of the neighbour to retrieve
|
||||
*
|
||||
* Returns the hardif neighbour node if found or created or NULL otherwise.
|
||||
*/
|
||||
static struct batadv_hardif_neigh_node *
|
||||
batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
|
||||
const u8 *neigh_addr)
|
||||
{
|
||||
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
|
||||
|
||||
/* first check without locking to avoid the overhead */
|
||||
hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
|
||||
if (hardif_neigh)
|
||||
return hardif_neigh;
|
||||
|
||||
return batadv_hardif_neigh_create(hard_iface, neigh_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
|
||||
* @hard_iface: the interface where this neighbour is connected to
|
||||
* @neigh_addr: the address of the neighbour
|
||||
*
|
||||
* Looks for and possibly returns a neighbour belonging to this hard interface.
|
||||
* Returns NULL if the neighbour is not found.
|
||||
*/
|
||||
struct batadv_hardif_neigh_node *
|
||||
batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
|
||||
const u8 *neigh_addr)
|
||||
{
|
||||
struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(tmp_hardif_neigh,
|
||||
&hard_iface->neigh_list, list) {
|
||||
if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
|
||||
continue;
|
||||
|
||||
if (!atomic_inc_not_zero(&tmp_hardif_neigh->refcount))
|
||||
continue;
|
||||
|
||||
hardif_neigh = tmp_hardif_neigh;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return hardif_neigh;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_new - create and init a new neigh_node object
|
||||
* @orig_node: originator object representing the neighbour
|
||||
@@ -493,11 +643,17 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
||||
const u8 *neigh_addr)
|
||||
{
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
|
||||
|
||||
neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
|
||||
if (neigh_node)
|
||||
goto out;
|
||||
|
||||
hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
|
||||
neigh_addr);
|
||||
if (!hardif_neigh)
|
||||
goto out;
|
||||
|
||||
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
|
||||
if (!neigh_node)
|
||||
goto out;
|
||||
@@ -523,14 +679,53 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
||||
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* increment unique neighbor refcount */
|
||||
atomic_inc(&hardif_neigh->refcount);
|
||||
|
||||
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
|
||||
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
|
||||
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
|
||||
|
||||
out:
|
||||
if (hardif_neigh)
|
||||
batadv_hardif_neigh_free_ref(hardif_neigh);
|
||||
return neigh_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
|
||||
* @seq: neighbour table seq_file struct
|
||||
* @offset: not used
|
||||
*
|
||||
* Always returns 0.
|
||||
*/
|
||||
int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
|
||||
{
|
||||
struct net_device *net_dev = (struct net_device *)seq->private;
|
||||
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct batadv_hard_iface *primary_if;
|
||||
|
||||
primary_if = batadv_seq_print_text_primary_if_get(seq);
|
||||
if (!primary_if)
|
||||
return 0;
|
||||
|
||||
seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
|
||||
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
|
||||
primary_if->net_dev->dev_addr, net_dev->name,
|
||||
bat_priv->bat_algo_ops->name);
|
||||
|
||||
batadv_hardif_free_ref(primary_if);
|
||||
|
||||
if (!bat_priv->bat_algo_ops->bat_neigh_print) {
|
||||
seq_puts(seq,
|
||||
"No printing function for this routing protocol\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
|
||||
* @rcu: rcu pointer of the orig_ifinfo object
|
||||
|
||||
@@ -41,6 +41,11 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
|
||||
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
|
||||
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
|
||||
const u8 *addr);
|
||||
struct batadv_hardif_neigh_node *
|
||||
batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
|
||||
const u8 *neigh_addr);
|
||||
void
|
||||
batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh);
|
||||
struct batadv_neigh_node *
|
||||
batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
||||
struct batadv_hard_iface *hard_iface,
|
||||
@@ -57,6 +62,8 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
|
||||
struct batadv_hard_iface *if_outgoing);
|
||||
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo);
|
||||
|
||||
int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset);
|
||||
|
||||
struct batadv_orig_ifinfo *
|
||||
batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
|
||||
struct batadv_hard_iface *if_outgoing);
|
||||
|
||||
@@ -72,8 +72,7 @@ enum batadv_subtype {
|
||||
* enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
|
||||
* @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
|
||||
* previously received from someone else than the best neighbor.
|
||||
* @BATADV_PRIMARIES_FIRST_HOP: flag is set when the primary interface address
|
||||
* is used, and the packet travels its first hop.
|
||||
* @BATADV_PRIMARIES_FIRST_HOP: flag unused.
|
||||
* @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
|
||||
* one hop neighbor on the interface where it was originally received.
|
||||
*/
|
||||
|
||||
@@ -497,9 +497,9 @@ batadv_find_router(struct batadv_priv *bat_priv,
|
||||
/* alternative candidate should be good enough to be
|
||||
* considered
|
||||
*/
|
||||
if (!bao->bat_neigh_is_equiv_or_better(cand_router,
|
||||
cand->if_outgoing,
|
||||
router, recv_if))
|
||||
if (!bao->bat_neigh_is_similar_or_better(cand_router,
|
||||
cand->if_outgoing,
|
||||
router, recv_if))
|
||||
goto next;
|
||||
|
||||
/* don't use the same router twice */
|
||||
|
||||
@@ -407,8 +407,7 @@ void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
|
||||
|
||||
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
|
||||
{
|
||||
if (forw_packet->skb)
|
||||
kfree_skb(forw_packet->skb);
|
||||
kfree_skb(forw_packet->skb);
|
||||
if (forw_packet->if_incoming)
|
||||
batadv_hardif_free_ref(forw_packet->if_incoming);
|
||||
if (forw_packet->if_outgoing)
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include "distributed-arp-table.h"
|
||||
#include "gateway_client.h"
|
||||
#include "gateway_common.h"
|
||||
#include "bridge_loop_avoidance.h"
|
||||
#include "hard-interface.h"
|
||||
#include "network-coding.h"
|
||||
#include "packet.h"
|
||||
@@ -241,10 +242,13 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
|
||||
|
||||
static int batadv_store_bool_attr(char *buff, size_t count,
|
||||
struct net_device *net_dev,
|
||||
const char *attr_name, atomic_t *attr)
|
||||
const char *attr_name, atomic_t *attr,
|
||||
bool *changed)
|
||||
{
|
||||
int enabled = -1;
|
||||
|
||||
*changed = false;
|
||||
|
||||
if (buff[count - 1] == '\n')
|
||||
buff[count - 1] = '\0';
|
||||
|
||||
@@ -271,6 +275,8 @@ static int batadv_store_bool_attr(char *buff, size_t count,
|
||||
atomic_read(attr) == 1 ? "enabled" : "disabled",
|
||||
enabled == 1 ? "enabled" : "disabled");
|
||||
|
||||
*changed = true;
|
||||
|
||||
atomic_set(attr, (unsigned int)enabled);
|
||||
return count;
|
||||
}
|
||||
@@ -281,11 +287,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
|
||||
struct attribute *attr,
|
||||
atomic_t *attr_store, struct net_device *net_dev)
|
||||
{
|
||||
bool changed;
|
||||
int ret;
|
||||
|
||||
ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
|
||||
attr_store);
|
||||
if (post_func && ret)
|
||||
attr_store, &changed);
|
||||
if (post_func && changed)
|
||||
post_func(net_dev);
|
||||
|
||||
return ret;
|
||||
@@ -549,7 +556,8 @@ static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
|
||||
BATADV_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
|
||||
BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
|
||||
BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR,
|
||||
batadv_bla_status_update);
|
||||
#endif
|
||||
#ifdef CONFIG_BATMAN_ADV_DAT
|
||||
BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR,
|
||||
|
||||
@@ -1443,7 +1443,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
|
||||
* TT_CLIENT_WIFI, therefore they have to be copied in the
|
||||
* client entry
|
||||
*/
|
||||
tt_global_entry->common.flags |= flags;
|
||||
common->flags |= flags;
|
||||
|
||||
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
|
||||
* one originator left in the list and we previously received a
|
||||
@@ -2419,8 +2419,8 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
|
||||
{
|
||||
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
|
||||
struct batadv_orig_node_vlan *vlan;
|
||||
int i, orig_num_vlan;
|
||||
u32 crc;
|
||||
int i;
|
||||
|
||||
/* check if each received CRC matches the locally stored one */
|
||||
for (i = 0; i < num_vlan; i++) {
|
||||
@@ -2446,6 +2446,18 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* check if any excess VLANs exist locally for the originator
|
||||
* which are not mentioned in the TVLV from the originator.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
orig_num_vlan = 0;
|
||||
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list)
|
||||
orig_num_vlan++;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (orig_num_vlan > num_vlan)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -3327,7 +3339,10 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
|
||||
bool ret = false;
|
||||
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (!vlan || !atomic_read(&vlan->ap_isolation))
|
||||
if (!vlan)
|
||||
return false;
|
||||
|
||||
if (!atomic_read(&vlan->ap_isolation))
|
||||
goto out;
|
||||
|
||||
tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
|
||||
@@ -3344,8 +3359,7 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
if (vlan)
|
||||
batadv_softif_vlan_free_ref(vlan);
|
||||
batadv_softif_vlan_free_ref(vlan);
|
||||
if (tt_global_entry)
|
||||
batadv_tt_global_entry_free_ref(tt_global_entry);
|
||||
if (tt_local_entry)
|
||||
|
||||
@@ -100,6 +100,8 @@ struct batadv_hard_iface_bat_iv {
|
||||
* @bat_iv: BATMAN IV specific per hard interface data
|
||||
* @cleanup_work: work queue callback item for hard interface deinit
|
||||
* @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
|
||||
* @neigh_list: list of unique single hop neighbors via this interface
|
||||
* @neigh_list_lock: lock protecting neigh_list
|
||||
*/
|
||||
struct batadv_hard_iface {
|
||||
struct list_head list;
|
||||
@@ -115,6 +117,9 @@ struct batadv_hard_iface {
|
||||
struct batadv_hard_iface_bat_iv bat_iv;
|
||||
struct work_struct cleanup_work;
|
||||
struct dentry *debug_dir;
|
||||
struct hlist_head neigh_list;
|
||||
/* neigh_list_lock protects: neigh_list */
|
||||
spinlock_t neigh_list_lock;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -218,12 +223,12 @@ struct batadv_orig_bat_iv {
|
||||
* @orig: originator ethernet address
|
||||
* @ifinfo_list: list for routers per outgoing interface
|
||||
* @last_bonding_candidate: pointer to last ifinfo of last used router
|
||||
* @batadv_dat_addr_t: address of the orig node in the distributed hash
|
||||
* @dat_addr: address of the orig node in the distributed hash
|
||||
* @last_seen: time when last packet from this node was received
|
||||
* @bcast_seqno_reset: time when the broadcast seqno window was reset
|
||||
* @mcast_handler_lock: synchronizes mcast-capability and -flag changes
|
||||
* @mcast_flags: multicast flags announced by the orig node
|
||||
* @mcast_want_all_unsnoop_node: a list node for the
|
||||
* @mcast_want_all_unsnoopables_node: a list node for the
|
||||
* mcast.want_all_unsnoopables list
|
||||
* @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
|
||||
* @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
|
||||
@@ -340,6 +345,23 @@ struct batadv_gw_node {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_node - unique neighbor per hard interface
|
||||
* @list: list node for batadv_hard_iface::neigh_list
|
||||
* @addr: the MAC address of the neighboring interface
|
||||
* @if_incoming: pointer to incoming hard interface
|
||||
* @refcount: number of contexts the object is used
|
||||
* @rcu: struct used for freeing in a RCU-safe manner
|
||||
*/
|
||||
struct batadv_hardif_neigh_node {
|
||||
struct hlist_node list;
|
||||
u8 addr[ETH_ALEN];
|
||||
struct batadv_hard_iface *if_incoming;
|
||||
unsigned long last_seen;
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct batadv_neigh_node - structure for single hops neighbors
|
||||
* @list: list node for batadv_orig_node::neigh_list
|
||||
@@ -349,9 +371,8 @@ struct batadv_gw_node {
|
||||
* @ifinfo_lock: lock protecting private ifinfo members and list
|
||||
* @if_incoming: pointer to incoming hard interface
|
||||
* @last_seen: when last packet via this neighbor was received
|
||||
* @last_ttl: last received ttl from this neigh node
|
||||
* @refcount: number of contexts the object is used
|
||||
* @rcu: struct used for freeing in an RCU-safe manner
|
||||
* @bat_iv: B.A.T.M.A.N. IV private structure
|
||||
*/
|
||||
struct batadv_neigh_node {
|
||||
struct hlist_node list;
|
||||
@@ -401,13 +422,14 @@ struct batadv_neigh_ifinfo {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
|
||||
/**
|
||||
* struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
|
||||
* @orig[ETH_ALEN]: mac address of orig node orginating the broadcast
|
||||
* @orig: mac address of orig node orginating the broadcast
|
||||
* @crc: crc32 checksum of broadcast payload
|
||||
* @entrytime: time when the broadcast packet was received
|
||||
*/
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
struct batadv_bcast_duplist_entry {
|
||||
u8 orig[ETH_ALEN];
|
||||
__be32 crc;
|
||||
@@ -549,9 +571,11 @@ struct batadv_priv_tt {
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
|
||||
/**
|
||||
* struct batadv_priv_bla - per mesh interface bridge loope avoidance data
|
||||
* @num_requests; number of bla requests in flight
|
||||
* @num_requests: number of bla requests in flight
|
||||
* @claim_hash: hash table containing mesh nodes this host has claimed
|
||||
* @backbone_hash: hash table containing all detected backbone gateways
|
||||
* @bcast_duplist: recently received broadcast packets array (for broadcast
|
||||
@@ -561,7 +585,6 @@ struct batadv_priv_tt {
|
||||
* @claim_dest: local claim data (e.g. claim group)
|
||||
* @work: work queue callback item for cleanups & bla announcements
|
||||
*/
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
struct batadv_priv_bla {
|
||||
atomic_t num_requests;
|
||||
struct batadv_hashtable *claim_hash;
|
||||
@@ -575,6 +598,8 @@ struct batadv_priv_bla {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_DEBUG
|
||||
|
||||
/**
|
||||
* struct batadv_priv_debug_log - debug logging data
|
||||
* @log_buff: buffer holding the logs (ring bufer)
|
||||
@@ -583,7 +608,6 @@ struct batadv_priv_bla {
|
||||
* @lock: lock protecting log_buff, log_start & log_end
|
||||
* @queue_wait: log reader's wait queue
|
||||
*/
|
||||
#ifdef CONFIG_BATMAN_ADV_DEBUG
|
||||
struct batadv_priv_debug_log {
|
||||
char log_buff[BATADV_LOG_BUF_LEN];
|
||||
unsigned long log_start;
|
||||
@@ -625,13 +649,14 @@ struct batadv_priv_tvlv {
|
||||
spinlock_t handler_list_lock; /* protects handler_list */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_DAT
|
||||
|
||||
/**
|
||||
* struct batadv_priv_dat - per mesh interface DAT private data
|
||||
* @addr: node DAT address
|
||||
* @hash: hashtable representing the local ARP cache
|
||||
* @work: work queue callback item for cache purging
|
||||
*/
|
||||
#ifdef CONFIG_BATMAN_ADV_DAT
|
||||
struct batadv_priv_dat {
|
||||
batadv_dat_addr_t addr;
|
||||
struct batadv_hashtable *hash;
|
||||
@@ -773,7 +798,7 @@ struct batadv_softif_vlan {
|
||||
* @dat: distributed arp table data
|
||||
* @mcast: multicast data
|
||||
* @network_coding: bool indicating whether network coding is enabled
|
||||
* @batadv_priv_nc: network coding data
|
||||
* @nc: network coding data
|
||||
*/
|
||||
struct batadv_priv {
|
||||
atomic_t mesh_state;
|
||||
@@ -871,6 +896,8 @@ struct batadv_socket_packet {
|
||||
u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
|
||||
/**
|
||||
* struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
|
||||
* @orig: originator address of backbone node (mac address of primary iface)
|
||||
@@ -884,10 +911,10 @@ struct batadv_socket_packet {
|
||||
* backbone gateway - no bcast traffic is formwared until the situation was
|
||||
* resolved
|
||||
* @crc: crc16 checksum over all claims
|
||||
* @crc_lock: lock protecting crc
|
||||
* @refcount: number of contexts the object is used
|
||||
* @rcu: struct used for freeing in an RCU-safe manner
|
||||
*/
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
struct batadv_bla_backbone_gw {
|
||||
u8 orig[ETH_ALEN];
|
||||
unsigned short vid;
|
||||
@@ -897,6 +924,7 @@ struct batadv_bla_backbone_gw {
|
||||
atomic_t wait_periods;
|
||||
atomic_t request_sent;
|
||||
u16 crc;
|
||||
spinlock_t crc_lock; /* protects crc */
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
@@ -905,7 +933,7 @@ struct batadv_bla_backbone_gw {
|
||||
* struct batadv_bla_claim - claimed non-mesh client structure
|
||||
* @addr: mac address of claimed non-mesh client
|
||||
* @vid: vlan id this client was detected on
|
||||
* @batadv_bla_backbone_gw: pointer to backbone gw claiming this client
|
||||
* @backbone_gw: pointer to backbone gw claiming this client
|
||||
* @lasttime: last time we heard of claim (locals only)
|
||||
* @hash_entry: hlist node for batadv_priv_bla::claim_hash
|
||||
* @refcount: number of contexts the object is used
|
||||
@@ -1131,11 +1159,13 @@ struct batadv_forw_packet {
|
||||
* @bat_primary_iface_set: called when primary interface is selected / changed
|
||||
* @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
|
||||
* @bat_ogm_emit: send scheduled OGM
|
||||
* @bat_hardif_neigh_init: called on creation of single hop entry
|
||||
* @bat_neigh_cmp: compare the metrics of two neighbors for their respective
|
||||
* outgoing interfaces
|
||||
* @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or better
|
||||
* than neigh2 for their respective outgoing interface from the metric
|
||||
* @bat_neigh_is_similar_or_better: check if neigh1 is equally similar or
|
||||
* better than neigh2 for their respective outgoing interface from the metric
|
||||
* prospective
|
||||
* @bat_neigh_print: print the single hop neighbor list (optional)
|
||||
* @bat_neigh_free: free the resources allocated by the routing algorithm for a
|
||||
* neigh_node object
|
||||
* @bat_orig_print: print the originator table (optional)
|
||||
@@ -1156,15 +1186,17 @@ struct batadv_algo_ops {
|
||||
void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
|
||||
void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
|
||||
/* neigh_node handling API */
|
||||
void (*bat_hardif_neigh_init)(struct batadv_hardif_neigh_node *neigh);
|
||||
int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
|
||||
struct batadv_hard_iface *if_outgoing1,
|
||||
struct batadv_neigh_node *neigh2,
|
||||
struct batadv_hard_iface *if_outgoing2);
|
||||
bool (*bat_neigh_is_equiv_or_better)
|
||||
bool (*bat_neigh_is_similar_or_better)
|
||||
(struct batadv_neigh_node *neigh1,
|
||||
struct batadv_hard_iface *if_outgoing1,
|
||||
struct batadv_neigh_node *neigh2,
|
||||
struct batadv_hard_iface *if_outgoing2);
|
||||
void (*bat_neigh_print)(struct batadv_priv *priv, struct seq_file *seq);
|
||||
void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
|
||||
/* orig_node handling API */
|
||||
void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
|
||||
@@ -1224,8 +1256,6 @@ struct batadv_dat_candidate {
|
||||
* struct batadv_tvlv_container - container for tvlv appended to OGMs
|
||||
* @list: hlist node for batadv_priv_tvlv::container_list
|
||||
* @tvlv_hdr: tvlv header information needed to construct the tvlv
|
||||
* @value_len: length of the buffer following this struct which contains
|
||||
* the actual tvlv payload
|
||||
* @refcount: number of contexts the object is used
|
||||
*/
|
||||
struct batadv_tvlv_container {
|
||||
|
||||
@@ -825,9 +825,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
|
||||
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
|
||||
spin_unlock(&devices_lock);
|
||||
|
||||
lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
|
||||
|
||||
err = register_netdev(netdev);
|
||||
err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
|
||||
if (err < 0) {
|
||||
BT_INFO("register_netdev failed %d", err);
|
||||
spin_lock(&devices_lock);
|
||||
@@ -890,7 +888,7 @@ static void delete_netdev(struct work_struct *work)
|
||||
struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
|
||||
delete_netdev);
|
||||
|
||||
unregister_netdev(entry->netdev);
|
||||
lowpan_unregister_netdev(entry->netdev);
|
||||
|
||||
/* The entry pointer is deleted by the netdev destructor. */
|
||||
}
|
||||
@@ -1348,7 +1346,7 @@ static void disconnect_devices(void)
|
||||
ifdown(entry->netdev);
|
||||
BT_DBG("Unregistering netdev %s %p",
|
||||
entry->netdev->name, entry->netdev);
|
||||
unregister_netdev(entry->netdev);
|
||||
lowpan_unregister_netdev(entry->netdev);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,8 +33,6 @@
|
||||
|
||||
#include "selftest.h"
|
||||
|
||||
#define VERSION "2.21"
|
||||
|
||||
/* Bluetooth sockets */
|
||||
#define BT_MAX_PROTO 8
|
||||
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
|
||||
@@ -176,20 +174,20 @@ EXPORT_SYMBOL(bt_accept_unlink);
|
||||
|
||||
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
|
||||
{
|
||||
struct list_head *p, *n;
|
||||
struct bt_sock *s, *n;
|
||||
struct sock *sk;
|
||||
|
||||
BT_DBG("parent %p", parent);
|
||||
|
||||
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
|
||||
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
|
||||
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
|
||||
sk = (struct sock *)s;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* FIXME: Is this check still needed */
|
||||
if (sk->sk_state == BT_CLOSED) {
|
||||
release_sock(sk);
|
||||
bt_accept_unlink(sk);
|
||||
release_sock(sk);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -390,11 +388,11 @@ EXPORT_SYMBOL(bt_sock_stream_recvmsg);
|
||||
|
||||
static inline unsigned int bt_accept_poll(struct sock *parent)
|
||||
{
|
||||
struct list_head *p, *n;
|
||||
struct bt_sock *s, *n;
|
||||
struct sock *sk;
|
||||
|
||||
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
|
||||
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
|
||||
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
|
||||
sk = (struct sock *)s;
|
||||
if (sk->sk_state == BT_CONNECTED ||
|
||||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
|
||||
sk->sk_state == BT_CONNECT2))
|
||||
@@ -671,7 +669,7 @@ static const struct file_operations bt_fops = {
|
||||
};
|
||||
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list* sk_list,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
sk_list->custom_seq_show = seq_show;
|
||||
@@ -687,7 +685,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
|
||||
}
|
||||
#else
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list* sk_list,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
return 0;
|
||||
@@ -715,7 +713,7 @@ static int __init bt_init(void)
|
||||
|
||||
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
|
||||
|
||||
BT_INFO("Core ver %s", VERSION);
|
||||
BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
|
||||
|
||||
err = bt_selftest();
|
||||
if (err < 0)
|
||||
@@ -789,7 +787,7 @@ subsys_initcall(bt_init);
|
||||
module_exit(bt_exit);
|
||||
|
||||
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
|
||||
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
|
||||
MODULE_VERSION(VERSION);
|
||||
MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
|
||||
MODULE_VERSION(BT_SUBSYS_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
|
||||
|
||||
@@ -608,8 +608,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
|
||||
s->msg.msg_flags = MSG_NOSIGNAL;
|
||||
|
||||
#ifdef CONFIG_BT_BNEP_MC_FILTER
|
||||
/* Set default mc filter */
|
||||
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
|
||||
/* Set default mc filter to not filter out any mc addresses
|
||||
* as defined in the BNEP specification (revision 0.95a)
|
||||
* http://grouper.ieee.org/groups/802/15/Bluetooth/BNEP.pdf
|
||||
*/
|
||||
s->mc_filter = ~0LL;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
|
||||
|
||||
@@ -100,10 +100,8 @@ static void cmtp_application_del(struct cmtp_session *session, struct cmtp_appli
|
||||
static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
|
||||
{
|
||||
struct cmtp_application *app;
|
||||
struct list_head *p;
|
||||
|
||||
list_for_each(p, &session->applications) {
|
||||
app = list_entry(p, struct cmtp_application, list);
|
||||
list_for_each_entry(app, &session->applications, list) {
|
||||
switch (pattern) {
|
||||
case CMTP_MSGNUM:
|
||||
if (app->msgnum == value)
|
||||
@@ -511,14 +509,12 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
|
||||
struct capi_ctr *ctrl = m->private;
|
||||
struct cmtp_session *session = ctrl->driverdata;
|
||||
struct cmtp_application *app;
|
||||
struct list_head *p;
|
||||
|
||||
seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
|
||||
seq_printf(m, "addr %s\n", session->name);
|
||||
seq_printf(m, "ctrl %d\n", session->num);
|
||||
|
||||
list_for_each(p, &session->applications) {
|
||||
app = list_entry(p, struct cmtp_application, list);
|
||||
list_for_each_entry(app, &session->applications, list) {
|
||||
seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
|
||||
}
|
||||
|
||||
|
||||
@@ -178,8 +178,7 @@ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *
|
||||
cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
|
||||
break;
|
||||
default:
|
||||
if (session->reassembly[id] != NULL)
|
||||
kfree_skb(session->reassembly[id]);
|
||||
kfree_skb(session->reassembly[id]);
|
||||
session->reassembly[id] = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -178,6 +178,10 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
||||
hci_dev_hold(conn->hdev);
|
||||
hci_conn_get(conn);
|
||||
|
||||
/* Even though we hold a reference to the hdev, many other
|
||||
* things might get cleaned up meanwhile, including the hdev's
|
||||
* own workqueue, so we can't use that for scheduling.
|
||||
*/
|
||||
schedule_work(&conn->le_scan_cleanup);
|
||||
}
|
||||
|
||||
@@ -664,8 +668,16 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
|
||||
status);
|
||||
/* If the status indicates successful cancellation of
|
||||
* the attempt (i.e. Unkown Connection Id) there's no point of
|
||||
* notifying failure since we'll go back to keep trying to
|
||||
* connect. The only exception is explicit connect requests
|
||||
* where a timeout + cancel does indicate an actual failure.
|
||||
*/
|
||||
if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
|
||||
(params && params->explicit_connect))
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
|
||||
hci_connect_cfm(conn, status);
|
||||
|
||||
@@ -679,7 +691,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
/* Re-enable advertising in case this was a failed connection
|
||||
* attempt as a peripheral.
|
||||
*/
|
||||
mgmt_reenable_advertising(hdev);
|
||||
hci_req_reenable_advertising(hdev);
|
||||
}
|
||||
|
||||
static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
@@ -722,8 +734,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
|
||||
if (hci_update_random_address(req, false, &own_addr_type))
|
||||
return;
|
||||
|
||||
/* Set window to be the same value as the interval to enable
|
||||
* continuous scanning.
|
||||
*/
|
||||
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
|
||||
cp.scan_window = cpu_to_le16(hdev->le_scan_window);
|
||||
cp.scan_window = cp.scan_interval;
|
||||
|
||||
bacpy(&cp.peer_addr, &conn->dst);
|
||||
cp.peer_addr_type = conn->dst_type;
|
||||
cp.own_address_type = own_addr_type;
|
||||
@@ -781,7 +797,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 role)
|
||||
{
|
||||
struct hci_conn_params *params;
|
||||
struct hci_conn *conn, *conn_unfinished;
|
||||
struct hci_conn *conn;
|
||||
struct smp_irk *irk;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
@@ -794,35 +810,22 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
/* Some devices send ATT messages as soon as the physical link is
|
||||
* established. To be able to handle these ATT messages, the user-
|
||||
* space first establishes the connection and then starts the pairing
|
||||
* process.
|
||||
*
|
||||
* So if a hci_conn object already exists for the following connection
|
||||
* attempt, we simply update pending_sec_level and auth_type fields
|
||||
* and return the object found.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
conn_unfinished = NULL;
|
||||
if (conn) {
|
||||
if (conn->state == BT_CONNECT &&
|
||||
test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
BT_DBG("will continue unfinished conn %pMR", dst);
|
||||
conn_unfinished = conn;
|
||||
} else {
|
||||
if (conn->pending_sec_level < sec_level)
|
||||
conn->pending_sec_level = sec_level;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* Since the controller supports only one LE connection attempt at a
|
||||
* time, we return -EBUSY if there is any connection attempt running.
|
||||
*/
|
||||
if (hci_lookup_le_connect(hdev))
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
/* If there's already a connection object but it's not in
|
||||
* scanning state it means it must already be established, in
|
||||
* which case we can't do anything else except report a failure
|
||||
* to connect.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
/* When given an identity address with existing identity
|
||||
* resolving key, the connection needs to be established
|
||||
* to a resolvable random address.
|
||||
@@ -838,23 +841,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
dst_type = ADDR_LE_DEV_RANDOM;
|
||||
}
|
||||
|
||||
if (conn_unfinished) {
|
||||
conn = conn_unfinished;
|
||||
if (conn) {
|
||||
bacpy(&conn->dst, dst);
|
||||
} else {
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
hci_conn_hold(conn);
|
||||
conn->pending_sec_level = sec_level;
|
||||
}
|
||||
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
conn->dst_type = dst_type;
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
conn->conn_timeout = conn_timeout;
|
||||
|
||||
if (!conn_unfinished)
|
||||
conn->pending_sec_level = sec_level;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
/* Disable advertising if we're active. For master role
|
||||
@@ -918,37 +918,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
done:
|
||||
/* If this is continuation of connect started by hci_connect_le_scan,
|
||||
* it already called hci_conn_hold and calling it again would mess the
|
||||
* counter.
|
||||
*/
|
||||
if (!conn_unfinished)
|
||||
hci_conn_hold(conn);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
|
||||
if (!status)
|
||||
return;
|
||||
|
||||
BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
|
||||
status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
||||
if (conn)
|
||||
hci_le_conn_failed(conn, status);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
@@ -964,10 +936,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static int hci_explicit_conn_params_set(struct hci_request *req,
|
||||
static int hci_explicit_conn_params_set(struct hci_dev *hdev,
|
||||
bdaddr_t *addr, u8 addr_type)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_conn_params *params;
|
||||
|
||||
if (is_connected(hdev, addr, addr_type))
|
||||
@@ -995,7 +966,6 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
|
||||
}
|
||||
|
||||
params->explicit_connect = true;
|
||||
__hci_update_background_scan(req);
|
||||
|
||||
BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
|
||||
params->auto_connect);
|
||||
@@ -1006,11 +976,9 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level,
|
||||
u16 conn_timeout, u8 role)
|
||||
u16 conn_timeout)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
/* Let's make sure that le is enabled.*/
|
||||
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
|
||||
@@ -1038,29 +1006,22 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
|
||||
BT_DBG("requesting refresh of dst_addr");
|
||||
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
|
||||
if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
conn->state = BT_CONNECT;
|
||||
set_bit(HCI_CONN_SCANNING, &conn->flags);
|
||||
|
||||
err = hci_req_run(&req, hci_connect_le_scan_complete);
|
||||
if (err && err != -ENODATA) {
|
||||
hci_conn_del(conn);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
conn->dst_type = dst_type;
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
conn->pending_sec_level = sec_level;
|
||||
conn->conn_timeout = conn_timeout;
|
||||
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
done:
|
||||
hci_conn_hold(conn);
|
||||
return conn;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1183,7 +1183,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
|
||||
hdev->discovery.state == DISCOVERY_FINDING)
|
||||
mgmt_reenable_advertising(hdev);
|
||||
hci_req_reenable_advertising(hdev);
|
||||
|
||||
break;
|
||||
|
||||
@@ -2176,7 +2176,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
|
||||
sizeof(cp), &cp);
|
||||
|
||||
hci_update_page_scan(hdev);
|
||||
hci_req_update_scan(hdev);
|
||||
}
|
||||
|
||||
/* Set packet type for incoming connection */
|
||||
@@ -2362,7 +2362,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
|
||||
hci_remove_link_key(hdev, &conn->dst);
|
||||
|
||||
hci_update_page_scan(hdev);
|
||||
hci_req_update_scan(hdev);
|
||||
}
|
||||
|
||||
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
|
||||
@@ -2401,7 +2401,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
* is timed out due to Directed Advertising."
|
||||
*/
|
||||
if (type == LE_LINK)
|
||||
mgmt_reenable_advertising(hdev);
|
||||
hci_req_reenable_advertising(hdev);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
@@ -3833,9 +3833,9 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
|
||||
data.ssp_mode = 0x01;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_MGMT))
|
||||
name_known = eir_has_data_type(info->data,
|
||||
sizeof(info->data),
|
||||
EIR_NAME_COMPLETE);
|
||||
name_known = eir_get_data(info->data,
|
||||
sizeof(info->data),
|
||||
EIR_NAME_COMPLETE, NULL);
|
||||
else
|
||||
name_known = true;
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,9 @@
|
||||
SOFTWARE IS DISCLAIMED.
|
||||
*/
|
||||
|
||||
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
|
||||
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
|
||||
|
||||
struct hci_request {
|
||||
struct hci_dev *hdev;
|
||||
struct sk_buff_head cmd_q;
|
||||
@@ -41,21 +44,61 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
|
||||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb);
|
||||
|
||||
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status);
|
||||
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status);
|
||||
void hci_req_sync_cancel(struct hci_dev *hdev, int err);
|
||||
|
||||
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param);
|
||||
|
||||
int __hci_req_hci_power_on(struct hci_dev *hdev);
|
||||
|
||||
void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
|
||||
void __hci_req_update_name(struct hci_request *req);
|
||||
void __hci_req_update_eir(struct hci_request *req);
|
||||
|
||||
void hci_req_add_le_scan_disable(struct hci_request *req);
|
||||
void hci_req_add_le_passive_scan(struct hci_request *req);
|
||||
|
||||
void hci_update_page_scan(struct hci_dev *hdev);
|
||||
void __hci_update_page_scan(struct hci_request *req);
|
||||
void hci_req_reenable_advertising(struct hci_dev *hdev);
|
||||
void __hci_req_enable_advertising(struct hci_request *req);
|
||||
void __hci_req_disable_advertising(struct hci_request *req);
|
||||
void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
|
||||
int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
|
||||
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
|
||||
|
||||
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
|
||||
bool force);
|
||||
void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
|
||||
u8 instance, bool force);
|
||||
|
||||
void __hci_req_update_class(struct hci_request *req);
|
||||
|
||||
/* Returns true if HCI commands were queued */
|
||||
bool hci_req_stop_discovery(struct hci_request *req);
|
||||
|
||||
static inline void hci_req_update_scan(struct hci_dev *hdev)
|
||||
{
|
||||
queue_work(hdev->req_workqueue, &hdev->scan_update);
|
||||
}
|
||||
|
||||
void __hci_req_update_scan(struct hci_request *req);
|
||||
|
||||
int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
||||
u8 *own_addr_type);
|
||||
|
||||
void hci_update_background_scan(struct hci_dev *hdev);
|
||||
void __hci_update_background_scan(struct hci_request *req);
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason);
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason);
|
||||
|
||||
static inline void hci_update_background_scan(struct hci_dev *hdev)
|
||||
{
|
||||
queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
|
||||
}
|
||||
|
||||
void hci_request_setup(struct hci_dev *hdev);
|
||||
void hci_request_cancel_all(struct hci_dev *hdev);
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
/* Bluetooth HCI sockets. */
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
@@ -120,13 +121,13 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
|
||||
/* Apply filter */
|
||||
flt = &hci_pi(sk)->filter;
|
||||
|
||||
flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
|
||||
flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
|
||||
|
||||
if (!test_bit(flt_type, &flt->type_mask))
|
||||
return true;
|
||||
|
||||
/* Extra filter for event packets only */
|
||||
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
|
||||
return false;
|
||||
|
||||
flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
|
||||
@@ -170,19 +171,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
continue;
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
|
||||
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
|
||||
continue;
|
||||
if (is_filtered_packet(sk, skb))
|
||||
continue;
|
||||
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
|
||||
if (!bt_cb(skb)->incoming)
|
||||
continue;
|
||||
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
|
||||
continue;
|
||||
} else {
|
||||
/* Don't send frame to other channel types */
|
||||
@@ -196,7 +197,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
continue;
|
||||
|
||||
/* Put type byte before the data */
|
||||
memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
|
||||
}
|
||||
|
||||
nskb = skb_clone(skb_copy, GFP_ATOMIC);
|
||||
@@ -262,7 +263,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
BT_DBG("hdev %p len %d", hdev, skb->len);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
|
||||
break;
|
||||
@@ -294,7 +295,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
return;
|
||||
|
||||
/* Put header before the data */
|
||||
hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
|
||||
hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = opcode;
|
||||
hdr->index = cpu_to_le16(hdev->id);
|
||||
hdr->len = cpu_to_le16(skb->len);
|
||||
@@ -375,7 +376,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
||||
|
||||
__net_timestamp(skb);
|
||||
|
||||
hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = opcode;
|
||||
hdr->index = cpu_to_le16(hdev->id);
|
||||
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
|
||||
@@ -383,6 +384,38 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void __printf(2, 3)
|
||||
send_monitor_note(struct sock *sk, const char *fmt, ...)
|
||||
{
|
||||
size_t len;
|
||||
struct hci_mon_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
len = vsnprintf(NULL, 0, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsprintf(skb_put(skb, len), fmt, args);
|
||||
*skb_put(skb, 1) = 0;
|
||||
va_end(args);
|
||||
|
||||
__net_timestamp(skb);
|
||||
|
||||
hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
|
||||
hdr->index = cpu_to_le16(HCI_DEV_NONE);
|
||||
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
|
||||
|
||||
if (sock_queue_rcv_skb(sk, skb))
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void send_monitor_replay(struct sock *sk)
|
||||
{
|
||||
struct hci_dev *hdev;
|
||||
@@ -436,18 +469,18 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
|
||||
hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
|
||||
hdr->evt = HCI_EV_STACK_INTERNAL;
|
||||
hdr->plen = sizeof(*ev) + dlen;
|
||||
|
||||
ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
|
||||
ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
|
||||
ev->type = type;
|
||||
memcpy(ev->data, data, dlen);
|
||||
|
||||
bt_cb(skb)->incoming = 1;
|
||||
__net_timestamp(skb);
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
hci_send_to_sock(hdev, skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
@@ -653,20 +686,20 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
case HCIGETCONNINFO:
|
||||
return hci_get_conn_info(hdev, (void __user *) arg);
|
||||
return hci_get_conn_info(hdev, (void __user *)arg);
|
||||
|
||||
case HCIGETAUTHINFO:
|
||||
return hci_get_auth_info(hdev, (void __user *) arg);
|
||||
return hci_get_auth_info(hdev, (void __user *)arg);
|
||||
|
||||
case HCIBLOCKADDR:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
return hci_sock_blacklist_add(hdev, (void __user *) arg);
|
||||
return hci_sock_blacklist_add(hdev, (void __user *)arg);
|
||||
|
||||
case HCIUNBLOCKADDR:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
return hci_sock_blacklist_del(hdev, (void __user *) arg);
|
||||
return hci_sock_blacklist_del(hdev, (void __user *)arg);
|
||||
}
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
@@ -675,7 +708,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
||||
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *) arg;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct sock *sk = sock->sk;
|
||||
int err;
|
||||
|
||||
@@ -872,11 +905,28 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
*/
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
send_monitor_note(sk, "Linux version %s (%s)",
|
||||
init_utsname()->release,
|
||||
init_utsname()->machine);
|
||||
send_monitor_note(sk, "Bluetooth subsystem version %s",
|
||||
BT_SUBSYS_VERSION);
|
||||
send_monitor_replay(sk);
|
||||
|
||||
atomic_inc(&monitor_promisc);
|
||||
break;
|
||||
|
||||
case HCI_CHANNEL_LOGGING:
|
||||
if (haddr.hci_dev != HCI_DEV_NONE) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
if (!hci_mgmt_chan_find(haddr.hci_channel)) {
|
||||
err = -EINVAL;
|
||||
@@ -926,7 +976,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
|
||||
int *addr_len, int peer)
|
||||
{
|
||||
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
|
||||
struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct hci_dev *hdev;
|
||||
int err = 0;
|
||||
@@ -991,8 +1041,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
|
||||
}
|
||||
}
|
||||
|
||||
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
int flags)
|
||||
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len, int flags)
|
||||
{
|
||||
int noblock = flags & MSG_DONTWAIT;
|
||||
struct sock *sk = sock->sk;
|
||||
@@ -1004,6 +1054,9 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
if (flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (sk->sk_state == BT_CLOSED)
|
||||
return 0;
|
||||
|
||||
@@ -1150,6 +1203,90 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
|
||||
{
|
||||
struct hci_mon_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
struct hci_dev *hdev;
|
||||
u16 index;
|
||||
int err;
|
||||
|
||||
/* The logging frame consists at minimum of the standard header,
|
||||
* the priority byte, the ident length byte and at least one string
|
||||
* terminator NUL byte. Anything shorter are invalid packets.
|
||||
*/
|
||||
if (len < sizeof(*hdr) + 3)
|
||||
return -EINVAL;
|
||||
|
||||
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
|
||||
err = -EFAULT;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
hdr = (void *)skb->data;
|
||||
|
||||
if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (__le16_to_cpu(hdr->opcode) == 0x0000) {
|
||||
__u8 priority = skb->data[sizeof(*hdr)];
|
||||
__u8 ident_len = skb->data[sizeof(*hdr) + 1];
|
||||
|
||||
/* Only the priorities 0-7 are valid and with that any other
|
||||
* value results in an invalid packet.
|
||||
*
|
||||
* The priority byte is followed by an ident length byte and
|
||||
* the NUL terminated ident string. Check that the ident
|
||||
* length is not overflowing the packet and also that the
|
||||
* ident string itself is NUL terminated. In case the ident
|
||||
* length is zero, the length value actually doubles as NUL
|
||||
* terminator identifier.
|
||||
*
|
||||
* The message follows the ident string (if present) and
|
||||
* must be NUL terminated. Otherwise it is not a valid packet.
|
||||
*/
|
||||
if (priority > 7 || skb->data[len - 1] != 0x00 ||
|
||||
ident_len > len - sizeof(*hdr) - 3 ||
|
||||
skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
index = __le16_to_cpu(hdr->index);
|
||||
|
||||
if (index != MGMT_INDEX_NONE) {
|
||||
hdev = hci_dev_get(index);
|
||||
if (!hdev) {
|
||||
err = -ENODEV;
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
hdev = NULL;
|
||||
}
|
||||
|
||||
hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
|
||||
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
|
||||
err = len;
|
||||
|
||||
if (hdev)
|
||||
hci_dev_put(hdev);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
@@ -1179,6 +1316,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
case HCI_CHANNEL_MONITOR:
|
||||
err = -EOPNOTSUPP;
|
||||
goto done;
|
||||
case HCI_CHANNEL_LOGGING:
|
||||
err = hci_logging_frame(sk, msg, len);
|
||||
goto done;
|
||||
default:
|
||||
mutex_lock(&mgmt_chan_list_lock);
|
||||
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
|
||||
@@ -1211,7 +1351,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
|
||||
hci_skb_pkt_type(skb) = skb->data[0];
|
||||
skb_pull(skb, 1);
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
|
||||
@@ -1220,16 +1360,16 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
*
|
||||
* However check that the packet type is valid.
|
||||
*/
|
||||
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
|
||||
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
|
||||
} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
|
||||
u16 opcode = get_unaligned_le16(skb->data);
|
||||
u16 ogf = hci_opcode_ogf(opcode);
|
||||
u16 ocf = hci_opcode_ocf(opcode);
|
||||
@@ -1242,6 +1382,11 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Since the opcode has already been extracted here, store
|
||||
* a copy of the value for later use by the drivers.
|
||||
*/
|
||||
hci_skb_opcode(skb) = opcode;
|
||||
|
||||
if (ogf == 0x3f) {
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
@@ -1249,7 +1394,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
/* Stand-alone HCI commands must be flagged as
|
||||
* single-command requests.
|
||||
*/
|
||||
bt_cb(skb)->hci.req_start = true;
|
||||
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
@@ -1260,8 +1405,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
|
||||
if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@@ -6538,8 +6538,6 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
||||
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
|
||||
chan->rx_state);
|
||||
|
||||
@@ -6570,7 +6568,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
||||
chan->last_acked_seq = control->txseq;
|
||||
chan->expected_tx_seq = __next_seq(chan, control->txseq);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
@@ -7113,8 +7111,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
chan->dcid = cid;
|
||||
|
||||
if (bdaddr_type_is_le(dst_type)) {
|
||||
u8 role;
|
||||
|
||||
/* Convert from L2CAP channel address type to HCI address type
|
||||
*/
|
||||
if (dst_type == BDADDR_LE_PUBLIC)
|
||||
@@ -7123,14 +7119,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
dst_type = ADDR_LE_DEV_RANDOM;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
|
||||
role = HCI_ROLE_SLAVE;
|
||||
hcon = hci_connect_le(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
HCI_ROLE_SLAVE);
|
||||
else
|
||||
role = HCI_ROLE_MASTER;
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT);
|
||||
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
role);
|
||||
} else {
|
||||
u8 auth_type = l2cap_get_auth_type(chan);
|
||||
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
|
||||
|
||||
1917
net/bluetooth/mgmt.c
1917
net/bluetooth/mgmt.c
File diff suppressed because it is too large
Load Diff
@@ -692,11 +692,9 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s)
|
||||
|
||||
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
|
||||
{
|
||||
struct rfcomm_session *s;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_session *s, *n;
|
||||
struct l2cap_chan *chan;
|
||||
list_for_each_safe(p, n, &session_list) {
|
||||
s = list_entry(p, struct rfcomm_session, list);
|
||||
list_for_each_entry_safe(s, n, &session_list, list) {
|
||||
chan = l2cap_pi(s->sock->sk)->chan;
|
||||
|
||||
if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) &&
|
||||
@@ -709,16 +707,14 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
|
||||
static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s,
|
||||
int err)
|
||||
{
|
||||
struct rfcomm_dlc *d;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_dlc *d, *n;
|
||||
|
||||
s->state = BT_CLOSED;
|
||||
|
||||
BT_DBG("session %p state %ld err %d", s, s->state, err);
|
||||
|
||||
/* Close all dlcs */
|
||||
list_for_each_safe(p, n, &s->dlcs) {
|
||||
d = list_entry(p, struct rfcomm_dlc, list);
|
||||
list_for_each_entry_safe(d, n, &s->dlcs, list) {
|
||||
d->state = BT_CLOSED;
|
||||
__rfcomm_dlc_close(d, err);
|
||||
}
|
||||
@@ -1771,13 +1767,11 @@ static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s,
|
||||
|
||||
static void rfcomm_process_connect(struct rfcomm_session *s)
|
||||
{
|
||||
struct rfcomm_dlc *d;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_dlc *d, *n;
|
||||
|
||||
BT_DBG("session %p state %ld", s, s->state);
|
||||
|
||||
list_for_each_safe(p, n, &s->dlcs) {
|
||||
d = list_entry(p, struct rfcomm_dlc, list);
|
||||
list_for_each_entry_safe(d, n, &s->dlcs, list) {
|
||||
if (d->state == BT_CONFIG) {
|
||||
d->mtu = s->mtu;
|
||||
if (rfcomm_check_security(d)) {
|
||||
@@ -1843,14 +1837,11 @@ static int rfcomm_process_tx(struct rfcomm_dlc *d)
|
||||
|
||||
static void rfcomm_process_dlcs(struct rfcomm_session *s)
|
||||
{
|
||||
struct rfcomm_dlc *d;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_dlc *d, *n;
|
||||
|
||||
BT_DBG("session %p state %ld", s, s->state);
|
||||
|
||||
list_for_each_safe(p, n, &s->dlcs) {
|
||||
d = list_entry(p, struct rfcomm_dlc, list);
|
||||
|
||||
list_for_each_entry_safe(d, n, &s->dlcs, list) {
|
||||
if (test_bit(RFCOMM_TIMED_OUT, &d->flags)) {
|
||||
__rfcomm_dlc_close(d, ETIMEDOUT);
|
||||
continue;
|
||||
@@ -1985,14 +1976,11 @@ static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s)
|
||||
|
||||
static void rfcomm_process_sessions(void)
|
||||
{
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_session *s, *n;
|
||||
|
||||
rfcomm_lock();
|
||||
|
||||
list_for_each_safe(p, n, &session_list) {
|
||||
struct rfcomm_session *s;
|
||||
s = list_entry(p, struct rfcomm_session, list);
|
||||
|
||||
list_for_each_entry_safe(s, n, &session_list, list) {
|
||||
if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
|
||||
s->state = BT_DISCONN;
|
||||
rfcomm_send_disc(s, 0);
|
||||
@@ -2075,15 +2063,12 @@ static int rfcomm_add_listener(bdaddr_t *ba)
|
||||
|
||||
static void rfcomm_kill_listener(void)
|
||||
{
|
||||
struct rfcomm_session *s;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_session *s, *n;
|
||||
|
||||
BT_DBG("");
|
||||
|
||||
list_for_each_safe(p, n, &session_list) {
|
||||
s = list_entry(p, struct rfcomm_session, list);
|
||||
list_for_each_entry_safe(s, n, &session_list, list)
|
||||
rfcomm_session_del(s);
|
||||
}
|
||||
}
|
||||
|
||||
static int rfcomm_run(void *unused)
|
||||
@@ -2113,8 +2098,7 @@ static int rfcomm_run(void *unused)
|
||||
static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
|
||||
{
|
||||
struct rfcomm_session *s;
|
||||
struct rfcomm_dlc *d;
|
||||
struct list_head *p, *n;
|
||||
struct rfcomm_dlc *d, *n;
|
||||
|
||||
BT_DBG("conn %p status 0x%02x encrypt 0x%02x", conn, status, encrypt);
|
||||
|
||||
@@ -2122,9 +2106,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
list_for_each_safe(p, n, &s->dlcs) {
|
||||
d = list_entry(p, struct rfcomm_dlc, list);
|
||||
|
||||
list_for_each_entry_safe(d, n, &s->dlcs, list) {
|
||||
if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
|
||||
rfcomm_dlc_clear_timer(d);
|
||||
if (status || encrypt == 0x00) {
|
||||
|
||||
@@ -135,6 +135,7 @@ static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
|
||||
{
|
||||
struct switchdev_obj_port_fdb fdb = {
|
||||
.obj = {
|
||||
.orig_dev = f->dst->dev,
|
||||
.id = SWITCHDEV_OBJ_ID_PORT_FDB,
|
||||
.flags = SWITCHDEV_F_DEFER,
|
||||
},
|
||||
|
||||
@@ -493,7 +493,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
||||
|
||||
dev->priv_flags |= IFF_BRIDGE_PORT;
|
||||
|
||||
err = netdev_master_upper_dev_link(dev, br->dev);
|
||||
err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL);
|
||||
if (err)
|
||||
goto err5;
|
||||
|
||||
@@ -511,8 +511,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
||||
if (br_fdb_insert(br, p, dev->dev_addr, 0))
|
||||
netdev_err(dev, "failed insert local address bridge forwarding table\n");
|
||||
|
||||
if (nbp_vlan_init(p))
|
||||
err = nbp_vlan_init(p);
|
||||
if (err) {
|
||||
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
|
||||
goto err6;
|
||||
}
|
||||
|
||||
spin_lock_bh(&br->lock);
|
||||
changed_addr = br_stp_recalculate_bridge_id(br);
|
||||
@@ -533,6 +536,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
err6:
|
||||
list_del_rcu(&p->list);
|
||||
br_fdb_delete_by_port(br, p, 0, 1);
|
||||
nbp_update_port_count(br);
|
||||
netdev_upper_dev_unlink(dev, br->dev);
|
||||
|
||||
err5:
|
||||
dev->priv_flags &= ~IFF_BRIDGE_PORT;
|
||||
netdev_rx_handler_unregister(dev);
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <linux/if_ether.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/switchdev.h>
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#include <net/ipv6.h>
|
||||
#include <net/addrconf.h>
|
||||
@@ -210,10 +211,32 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
|
||||
static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
|
||||
int type)
|
||||
{
|
||||
struct switchdev_obj_port_mdb mdb = {
|
||||
.obj = {
|
||||
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
|
||||
.flags = SWITCHDEV_F_DEFER,
|
||||
},
|
||||
.vid = entry->vid,
|
||||
};
|
||||
struct net_device *port_dev;
|
||||
struct net *net = dev_net(dev);
|
||||
struct sk_buff *skb;
|
||||
int err = -ENOBUFS;
|
||||
|
||||
port_dev = __dev_get_by_index(net, entry->ifindex);
|
||||
if (entry->addr.proto == htons(ETH_P_IP))
|
||||
ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
|
||||
#endif
|
||||
|
||||
mdb.obj.orig_dev = port_dev;
|
||||
if (port_dev && type == RTM_NEWMDB)
|
||||
switchdev_port_obj_add(port_dev, &mdb.obj);
|
||||
else if (port_dev && type == RTM_DELMDB)
|
||||
switchdev_port_obj_del(port_dev, &mdb.obj);
|
||||
|
||||
skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
@@ -40,6 +40,7 @@ void br_log_state(const struct net_bridge_port *p)
|
||||
void br_set_state(struct net_bridge_port *p, unsigned int state)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = p->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
|
||||
.flags = SWITCHDEV_F_DEFER,
|
||||
.u.stp_state = state,
|
||||
@@ -570,6 +571,7 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
|
||||
int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = br->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
|
||||
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
|
||||
.u.ageing_time = ageing_time,
|
||||
|
||||
@@ -37,6 +37,7 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
|
||||
void br_init_port(struct net_bridge_port *p)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = p->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
|
||||
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
|
||||
.u.ageing_time = jiffies_to_clock_t(p->br->ageing_time),
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
#include "br_private.h"
|
||||
|
||||
#define to_dev(obj) container_of(obj, struct device, kobj)
|
||||
#define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
|
||||
|
||||
/*
|
||||
@@ -814,7 +813,7 @@ static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct device *dev = to_dev(kobj);
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct net_bridge *br = to_bridge(dev);
|
||||
int n;
|
||||
|
||||
|
||||
@@ -73,6 +73,7 @@ static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
|
||||
u16 vid, u16 flags)
|
||||
{
|
||||
struct switchdev_obj_port_vlan v = {
|
||||
.obj.orig_dev = dev,
|
||||
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
|
||||
.flags = flags,
|
||||
.vid_begin = vid,
|
||||
@@ -120,6 +121,7 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
|
||||
u16 vid)
|
||||
{
|
||||
struct switchdev_obj_port_vlan v = {
|
||||
.obj.orig_dev = dev,
|
||||
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
|
||||
.vid_begin = vid,
|
||||
.vid_end = vid,
|
||||
@@ -624,9 +626,21 @@ void br_recalculate_fwd_mask(struct net_bridge *br)
|
||||
|
||||
int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = br->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
|
||||
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
|
||||
.u.vlan_filtering = val,
|
||||
};
|
||||
int err;
|
||||
|
||||
if (br->vlan_enabled == val)
|
||||
return 0;
|
||||
|
||||
err = switchdev_port_attr_set(br->dev, &attr);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
|
||||
br->vlan_enabled = val;
|
||||
br_manage_promisc(br);
|
||||
recalculate_group_addr(br);
|
||||
@@ -637,13 +651,15 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
|
||||
|
||||
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!rtnl_trylock())
|
||||
return restart_syscall();
|
||||
|
||||
__br_vlan_filter_toggle(br, val);
|
||||
err = __br_vlan_filter_toggle(br, val);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
|
||||
@@ -891,6 +907,12 @@ int br_vlan_init(struct net_bridge *br)
|
||||
|
||||
int nbp_vlan_init(struct net_bridge_port *p)
|
||||
{
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = p->br->dev,
|
||||
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
|
||||
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
|
||||
.u.vlan_filtering = p->br->vlan_enabled,
|
||||
};
|
||||
struct net_bridge_vlan_group *vg;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
@@ -898,6 +920,10 @@ int nbp_vlan_init(struct net_bridge_port *p)
|
||||
if (!vg)
|
||||
goto out;
|
||||
|
||||
ret = switchdev_port_attr_set(p->dev, &attr);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
goto err_vlan_enabled;
|
||||
|
||||
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
|
||||
if (ret)
|
||||
goto err_rhtbl;
|
||||
@@ -917,6 +943,7 @@ int nbp_vlan_init(struct net_bridge_port *p)
|
||||
RCU_INIT_POINTER(p->vlgrp, NULL);
|
||||
synchronize_rcu();
|
||||
rhashtable_destroy(&vg->vlan_hash);
|
||||
err_vlan_enabled:
|
||||
err_rhtbl:
|
||||
kfree(vg);
|
||||
|
||||
|
||||
@@ -65,8 +65,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
return false;
|
||||
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
|
||||
return false;
|
||||
if (!(info->bitmask & ( EBT_IP6_DPORT |
|
||||
EBT_IP6_SPORT | EBT_IP6_ICMP6)))
|
||||
if (!(info->bitmask & (EBT_IP6_DPORT |
|
||||
EBT_IP6_SPORT | EBT_IP6_ICMP6)))
|
||||
return true;
|
||||
|
||||
/* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
|
||||
|
||||
@@ -36,14 +36,12 @@ static int ebt_log_tg_check(const struct xt_tgchk_param *par)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tcpudphdr
|
||||
{
|
||||
struct tcpudphdr {
|
||||
__be16 src;
|
||||
__be16 dst;
|
||||
};
|
||||
|
||||
struct arppayload
|
||||
{
|
||||
struct arppayload {
|
||||
unsigned char mac_src[ETH_ALEN];
|
||||
unsigned char ip_src[4];
|
||||
unsigned char mac_dst[ETH_ALEN];
|
||||
@@ -152,7 +150,8 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
ntohs(ah->ar_op));
|
||||
|
||||
/* If it's for Ethernet and the lengths are OK,
|
||||
* then log the ARP payload */
|
||||
* then log the ARP payload
|
||||
*/
|
||||
if (ah->ar_hrd == htons(1) &&
|
||||
ah->ar_hln == ETH_ALEN &&
|
||||
ah->ar_pln == sizeof(__be32)) {
|
||||
|
||||
@@ -41,7 +41,7 @@ struct stp_config_pdu {
|
||||
#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
|
||||
|
||||
static bool ebt_filter_config(const struct ebt_stp_info *info,
|
||||
const struct stp_config_pdu *stpc)
|
||||
const struct stp_config_pdu *stpc)
|
||||
{
|
||||
const struct ebt_stp_config_info *c;
|
||||
uint16_t v16;
|
||||
|
||||
@@ -66,7 +66,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
* - Canonical Format Indicator (CFI). The Canonical Format Indicator
|
||||
* (CFI) is a single bit flag value. Currently ignored.
|
||||
* - VLAN Identifier (VID). The VID is encoded as
|
||||
* an unsigned binary number. */
|
||||
* an unsigned binary number.
|
||||
*/
|
||||
id = TCI & VLAN_VID_MASK;
|
||||
prio = (TCI >> 13) & 0x7;
|
||||
|
||||
@@ -98,7 +99,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
|
||||
}
|
||||
|
||||
/* Check for bitmask range
|
||||
* True if even one bit is out of mask */
|
||||
* True if even one bit is out of mask
|
||||
*/
|
||||
if (info->bitmask & ~EBT_VLAN_MASK) {
|
||||
pr_debug("bitmask %2X is out of mask (%2X)\n",
|
||||
info->bitmask, EBT_VLAN_MASK);
|
||||
@@ -117,7 +119,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
|
||||
* 0 - The null VLAN ID.
|
||||
* 1 - The default Port VID (PVID)
|
||||
* 0x0FFF - Reserved for implementation use.
|
||||
* if_vlan.h: VLAN_N_VID 4096. */
|
||||
* if_vlan.h: VLAN_N_VID 4096.
|
||||
*/
|
||||
if (GET_BITMASK(EBT_VLAN_ID)) {
|
||||
if (!!info->id) { /* if id!=0 => check vid range */
|
||||
if (info->id > VLAN_N_VID) {
|
||||
@@ -128,7 +131,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
|
||||
/* Note: This is valid VLAN-tagged frame point.
|
||||
* Any value of user_priority are acceptable,
|
||||
* but should be ignored according to 802.1Q Std.
|
||||
* So we just drop the prio flag. */
|
||||
* So we just drop the prio flag.
|
||||
*/
|
||||
info->bitmask &= ~EBT_VLAN_PRIO;
|
||||
}
|
||||
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */
|
||||
@@ -143,7 +147,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
|
||||
}
|
||||
/* Check for encapsulated proto range - it is possible to be
|
||||
* any value for u_short range.
|
||||
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */
|
||||
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS
|
||||
*/
|
||||
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
|
||||
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
|
||||
pr_debug("encap frame length %d is less than "
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
|
||||
(1 << NF_BR_LOCAL_OUT))
|
||||
(1 << NF_BR_LOCAL_OUT))
|
||||
|
||||
static struct ebt_entries initial_chains[] = {
|
||||
{
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
|
||||
(1 << NF_BR_POST_ROUTING))
|
||||
(1 << NF_BR_POST_ROUTING))
|
||||
|
||||
static struct ebt_entries initial_chains[] = {
|
||||
{
|
||||
|
||||
@@ -35,8 +35,7 @@
|
||||
"report to author: "format, ## args)
|
||||
/* #define BUGPRINT(format, args...) */
|
||||
|
||||
/*
|
||||
* Each cpu has its own set of counters, so there is no need for write_lock in
|
||||
/* Each cpu has its own set of counters, so there is no need for write_lock in
|
||||
* the softirq
|
||||
* For reading or updating the counters, the user context needs to
|
||||
* get a write_lock
|
||||
@@ -46,7 +45,7 @@
|
||||
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
|
||||
#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
|
||||
#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
|
||||
COUNTER_OFFSET(n) * cpu))
|
||||
COUNTER_OFFSET(n) * cpu))
|
||||
|
||||
|
||||
|
||||
@@ -126,7 +125,7 @@ ebt_dev_check(const char *entry, const struct net_device *device)
|
||||
/* process standard matches */
|
||||
static inline int
|
||||
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out)
|
||||
const struct net_device *in, const struct net_device *out)
|
||||
{
|
||||
const struct ethhdr *h = eth_hdr(skb);
|
||||
const struct net_bridge_port *p;
|
||||
@@ -162,7 +161,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
|
||||
for (i = 0; i < 6; i++)
|
||||
verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
|
||||
e->sourcemsk[i];
|
||||
if (FWINV2(verdict != 0, EBT_ISOURCE) )
|
||||
if (FWINV2(verdict != 0, EBT_ISOURCE))
|
||||
return 1;
|
||||
}
|
||||
if (e->bitmask & EBT_DESTMAC) {
|
||||
@@ -170,7 +169,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
|
||||
for (i = 0; i < 6; i++)
|
||||
verdict |= (h->h_dest[i] ^ e->destmac[i]) &
|
||||
e->destmsk[i];
|
||||
if (FWINV2(verdict != 0, EBT_IDEST) )
|
||||
if (FWINV2(verdict != 0, EBT_IDEST))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -237,7 +236,8 @@ unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
(*(counter_base + i)).bcnt += skb->len;
|
||||
|
||||
/* these should only watch: not modify, nor tell us
|
||||
what to do with the packet */
|
||||
* what to do with the packet
|
||||
*/
|
||||
EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
|
||||
|
||||
t = (struct ebt_entry_target *)
|
||||
@@ -323,7 +323,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
/* If it succeeds, returns element and locks mutex */
|
||||
static inline void *
|
||||
find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
|
||||
struct mutex *mutex)
|
||||
struct mutex *mutex)
|
||||
{
|
||||
struct {
|
||||
struct list_head list;
|
||||
@@ -342,7 +342,7 @@ find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
|
||||
|
||||
static void *
|
||||
find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
|
||||
int *error, struct mutex *mutex)
|
||||
int *error, struct mutex *mutex)
|
||||
{
|
||||
return try_then_request_module(
|
||||
find_inlist_lock_noload(head, name, error, mutex),
|
||||
@@ -451,7 +451,8 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
|
||||
if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
|
||||
if (e->bitmask != 0) {
|
||||
/* we make userspace set this right,
|
||||
so there is no misunderstanding */
|
||||
* so there is no misunderstanding
|
||||
*/
|
||||
BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
|
||||
"in distinguisher\n");
|
||||
return -EINVAL;
|
||||
@@ -487,15 +488,14 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* this one is very careful, as it is the first function
|
||||
/* this one is very careful, as it is the first function
|
||||
* to parse the userspace data
|
||||
*/
|
||||
static inline int
|
||||
ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
|
||||
const struct ebt_table_info *newinfo,
|
||||
unsigned int *n, unsigned int *cnt,
|
||||
unsigned int *totalcnt, unsigned int *udc_cnt)
|
||||
const struct ebt_table_info *newinfo,
|
||||
unsigned int *n, unsigned int *cnt,
|
||||
unsigned int *totalcnt, unsigned int *udc_cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -504,10 +504,12 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
|
||||
break;
|
||||
}
|
||||
/* beginning of a new chain
|
||||
if i == NF_BR_NUMHOOKS it must be a user defined chain */
|
||||
* if i == NF_BR_NUMHOOKS it must be a user defined chain
|
||||
*/
|
||||
if (i != NF_BR_NUMHOOKS || !e->bitmask) {
|
||||
/* this checks if the previous chain has as many entries
|
||||
as it said it has */
|
||||
* as it said it has
|
||||
*/
|
||||
if (*n != *cnt) {
|
||||
BUGPRINT("nentries does not equal the nr of entries "
|
||||
"in the chain\n");
|
||||
@@ -549,20 +551,18 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ebt_cl_stack
|
||||
{
|
||||
struct ebt_cl_stack {
|
||||
struct ebt_chainstack cs;
|
||||
int from;
|
||||
unsigned int hookmask;
|
||||
};
|
||||
|
||||
/*
|
||||
* we need these positions to check that the jumps to a different part of the
|
||||
/* We need these positions to check that the jumps to a different part of the
|
||||
* entries is a jump to the beginning of a new chain.
|
||||
*/
|
||||
static inline int
|
||||
ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
|
||||
unsigned int *n, struct ebt_cl_stack *udc)
|
||||
unsigned int *n, struct ebt_cl_stack *udc)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -649,9 +649,9 @@ ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
|
||||
|
||||
static inline int
|
||||
ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
const struct ebt_table_info *newinfo,
|
||||
const char *name, unsigned int *cnt,
|
||||
struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
|
||||
const struct ebt_table_info *newinfo,
|
||||
const char *name, unsigned int *cnt,
|
||||
struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
|
||||
{
|
||||
struct ebt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
@@ -673,7 +673,7 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
BUGPRINT("Unknown flag for inv bitmask\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
|
||||
if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
|
||||
BUGPRINT("NOPROTO & 802_3 not allowed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -687,7 +687,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
break;
|
||||
}
|
||||
/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
|
||||
a base chain */
|
||||
* a base chain
|
||||
*/
|
||||
if (i < NF_BR_NUMHOOKS)
|
||||
hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
|
||||
else {
|
||||
@@ -758,13 +759,12 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* checks for loops and sets the hook mask for udc
|
||||
/* checks for loops and sets the hook mask for udc
|
||||
* the hook mask for udc tells us from which base chains the udc can be
|
||||
* accessed. This mask is a parameter to the check() functions of the extensions
|
||||
*/
|
||||
static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
|
||||
unsigned int udc_cnt, unsigned int hooknr, char *base)
|
||||
unsigned int udc_cnt, unsigned int hooknr, char *base)
|
||||
{
|
||||
int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
|
||||
const struct ebt_entry *e = (struct ebt_entry *)chain->data;
|
||||
@@ -853,7 +853,8 @@ static int translate_table(struct net *net, const char *name,
|
||||
return -EINVAL;
|
||||
}
|
||||
/* make sure chains are ordered after each other in same order
|
||||
as their corresponding hooks */
|
||||
* as their corresponding hooks
|
||||
*/
|
||||
for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
|
||||
if (!newinfo->hook_entry[j])
|
||||
continue;
|
||||
@@ -868,7 +869,8 @@ static int translate_table(struct net *net, const char *name,
|
||||
i = 0; /* holds the expected nr. of entries for the chain */
|
||||
j = 0; /* holds the up to now counted entries for the chain */
|
||||
k = 0; /* holds the total nr. of entries, should equal
|
||||
newinfo->nentries afterwards */
|
||||
* newinfo->nentries afterwards
|
||||
*/
|
||||
udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
|
||||
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
|
||||
ebt_check_entry_size_and_hooks, newinfo,
|
||||
@@ -888,10 +890,12 @@ static int translate_table(struct net *net, const char *name,
|
||||
}
|
||||
|
||||
/* get the location of the udc, put them in an array
|
||||
while we're at it, allocate the chainstack */
|
||||
* while we're at it, allocate the chainstack
|
||||
*/
|
||||
if (udc_cnt) {
|
||||
/* this will get free'd in do_replace()/ebt_register_table()
|
||||
if an error occurs */
|
||||
* if an error occurs
|
||||
*/
|
||||
newinfo->chainstack =
|
||||
vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
|
||||
if (!newinfo->chainstack)
|
||||
@@ -932,14 +936,15 @@ static int translate_table(struct net *net, const char *name,
|
||||
}
|
||||
|
||||
/* we now know the following (along with E=mc²):
|
||||
- the nr of entries in each chain is right
|
||||
- the size of the allocated space is right
|
||||
- all valid hooks have a corresponding chain
|
||||
- there are no loops
|
||||
- wrong data can still be on the level of a single entry
|
||||
- could be there are jumps to places that are not the
|
||||
beginning of a chain. This can only occur in chains that
|
||||
are not accessible from any base chains, so we don't care. */
|
||||
* - the nr of entries in each chain is right
|
||||
* - the size of the allocated space is right
|
||||
* - all valid hooks have a corresponding chain
|
||||
* - there are no loops
|
||||
* - wrong data can still be on the level of a single entry
|
||||
* - could be there are jumps to places that are not the
|
||||
* beginning of a chain. This can only occur in chains that
|
||||
* are not accessible from any base chains, so we don't care.
|
||||
*/
|
||||
|
||||
/* used to know what we need to clean up if something goes wrong */
|
||||
i = 0;
|
||||
@@ -955,7 +960,7 @@ static int translate_table(struct net *net, const char *name,
|
||||
|
||||
/* called under write_lock */
|
||||
static void get_counters(const struct ebt_counter *oldcounters,
|
||||
struct ebt_counter *counters, unsigned int nentries)
|
||||
struct ebt_counter *counters, unsigned int nentries)
|
||||
{
|
||||
int i, cpu;
|
||||
struct ebt_counter *counter_base;
|
||||
@@ -986,7 +991,8 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
|
||||
struct ebt_table *t;
|
||||
|
||||
/* the user wants counters back
|
||||
the check on the size is done later, when we have the lock */
|
||||
* the check on the size is done later, when we have the lock
|
||||
*/
|
||||
if (repl->num_counters) {
|
||||
unsigned long size = repl->num_counters * sizeof(*counterstmp);
|
||||
counterstmp = vmalloc(size);
|
||||
@@ -1038,9 +1044,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
|
||||
write_unlock_bh(&t->lock);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
/* so, a user can change the chains while having messed up her counter
|
||||
allocation. Only reason why this is done is because this way the lock
|
||||
is held only once, while this doesn't bring the kernel into a
|
||||
dangerous state. */
|
||||
* allocation. Only reason why this is done is because this way the lock
|
||||
* is held only once, while this doesn't bring the kernel into a
|
||||
* dangerous state.
|
||||
*/
|
||||
if (repl->num_counters &&
|
||||
copy_to_user(repl->counters, counterstmp,
|
||||
repl->num_counters * sizeof(struct ebt_counter))) {
|
||||
@@ -1342,13 +1349,14 @@ static int update_counters(struct net *net, const void __user *user,
|
||||
}
|
||||
|
||||
static inline int ebt_make_matchname(const struct ebt_entry_match *m,
|
||||
const char *base, char __user *ubase)
|
||||
const char *base, char __user *ubase)
|
||||
{
|
||||
char __user *hlp = ubase + ((char *)m - base);
|
||||
char name[EBT_FUNCTION_MAXNAMELEN] = {};
|
||||
|
||||
/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
|
||||
long. Copy 29 bytes and fill remaining bytes with zeroes. */
|
||||
* long. Copy 29 bytes and fill remaining bytes with zeroes.
|
||||
*/
|
||||
strlcpy(name, m->u.match->name, sizeof(name));
|
||||
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
|
||||
return -EFAULT;
|
||||
@@ -1356,19 +1364,19 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
|
||||
}
|
||||
|
||||
static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
|
||||
const char *base, char __user *ubase)
|
||||
const char *base, char __user *ubase)
|
||||
{
|
||||
char __user *hlp = ubase + ((char *)w - base);
|
||||
char name[EBT_FUNCTION_MAXNAMELEN] = {};
|
||||
|
||||
strlcpy(name, w->u.watcher->name, sizeof(name));
|
||||
if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
|
||||
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
|
||||
static inline int ebt_make_names(struct ebt_entry *e, const char *base,
|
||||
char __user *ubase)
|
||||
{
|
||||
int ret;
|
||||
char __user *hlp;
|
||||
@@ -1394,9 +1402,9 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
|
||||
}
|
||||
|
||||
static int copy_counters_to_user(struct ebt_table *t,
|
||||
const struct ebt_counter *oldcounters,
|
||||
void __user *user, unsigned int num_counters,
|
||||
unsigned int nentries)
|
||||
const struct ebt_counter *oldcounters,
|
||||
void __user *user, unsigned int num_counters,
|
||||
unsigned int nentries)
|
||||
{
|
||||
struct ebt_counter *counterstmp;
|
||||
int ret = 0;
|
||||
@@ -1427,7 +1435,7 @@ static int copy_counters_to_user(struct ebt_table *t,
|
||||
|
||||
/* called with ebt_mutex locked */
|
||||
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
|
||||
const int *len, int cmd)
|
||||
const int *len, int cmd)
|
||||
{
|
||||
struct ebt_replace tmp;
|
||||
const struct ebt_counter *oldcounters;
|
||||
@@ -1595,8 +1603,7 @@ static int ebt_compat_entry_padsize(void)
|
||||
static int ebt_compat_match_offset(const struct xt_match *match,
|
||||
unsigned int userlen)
|
||||
{
|
||||
/*
|
||||
* ebt_among needs special handling. The kernel .matchsize is
|
||||
/* ebt_among needs special handling. The kernel .matchsize is
|
||||
* set to -1 at registration time; at runtime an EBT_ALIGN()ed
|
||||
* value is expected.
|
||||
* Example: userspace sends 4500, ebt_among.c wants 4504.
|
||||
@@ -1966,8 +1973,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
||||
return off + match_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* return size of all matches, watchers or target, including necessary
|
||||
/* return size of all matches, watchers or target, including necessary
|
||||
* alignment and padding.
|
||||
*/
|
||||
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
||||
@@ -2070,8 +2076,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
buf_start = (char *) entry;
|
||||
/*
|
||||
* 0: matches offset, always follows ebt_entry.
|
||||
/* 0: matches offset, always follows ebt_entry.
|
||||
* 1: watchers offset, from ebt_entry structure
|
||||
* 2: target offset, from ebt_entry structure
|
||||
* 3: next ebt_entry offset, from ebt_entry structure
|
||||
@@ -2115,8 +2120,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* repl->entries_size is the size of the ebt_entry blob in userspace.
|
||||
/* repl->entries_size is the size of the ebt_entry blob in userspace.
|
||||
* It might need more memory when copied to a 64 bit kernel in case
|
||||
* userspace is 32-bit. So, first task: find out how much memory is needed.
|
||||
*
|
||||
@@ -2305,7 +2309,7 @@ static int compat_do_ebt_set_ctl(struct sock *sk,
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2360,8 +2364,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
|
||||
break;
|
||||
case EBT_SO_GET_ENTRIES:
|
||||
case EBT_SO_GET_INIT_ENTRIES:
|
||||
/*
|
||||
* try real handler first in case of userland-side padding.
|
||||
/* try real handler first in case of userland-side padding.
|
||||
* in case we are dealing with an 'ordinary' 32 bit binary
|
||||
* without 64bit compatibility padding, this will fail right
|
||||
* after copy_from_user when the *len argument is validated.
|
||||
|
||||
@@ -141,7 +141,7 @@ static int nf_tables_bridge_init_net(struct net *net)
|
||||
|
||||
static void nf_tables_bridge_exit_net(struct net *net)
|
||||
{
|
||||
nft_unregister_afinfo(net->nft.bridge);
|
||||
nft_unregister_afinfo(net, net->nft.bridge);
|
||||
kfree(net->nft.bridge);
|
||||
}
|
||||
|
||||
|
||||
@@ -84,6 +84,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
|
||||
.eval = nft_meta_set_eval,
|
||||
.init = nft_meta_set_init,
|
||||
.destroy = nft_meta_set_destroy,
|
||||
.dump = nft_meta_set_dump,
|
||||
};
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
|
||||
|
||||
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
|
||||
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
|
||||
sock_diag.o dev_ioctl.o tso.o
|
||||
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o
|
||||
|
||||
obj-$(CONFIG_XFRM) += flow.o
|
||||
obj-y += net-sysfs.o
|
||||
|
||||
@@ -83,8 +83,8 @@ static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int syn
|
||||
/*
|
||||
* Wait for the last received packet to be different from skb
|
||||
*/
|
||||
static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
|
||||
const struct sk_buff *skb)
|
||||
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
int error;
|
||||
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
|
||||
@@ -130,6 +130,7 @@ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
|
||||
error = 1;
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_wait_for_more_packets);
|
||||
|
||||
static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
|
||||
{
|
||||
@@ -161,13 +162,15 @@ static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/**
|
||||
* __skb_recv_datagram - Receive a datagram skbuff
|
||||
* __skb_try_recv_datagram - Receive a datagram skbuff
|
||||
* @sk: socket
|
||||
* @flags: MSG_ flags
|
||||
* @peeked: returns non-zero if this packet has been seen before
|
||||
* @off: an offset in bytes to peek skb from. Returns an offset
|
||||
* within an skb where data actually starts
|
||||
* @err: error code returned
|
||||
* @last: set to last peeked message to inform the wait function
|
||||
* what to look for when peeking
|
||||
*
|
||||
* Get a datagram skbuff, understands the peeking, nonblocking wakeups
|
||||
* and possible races. This replaces identical code in packet, raw and
|
||||
@@ -175,9 +178,11 @@ static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
|
||||
* the long standing peek and read race for datagram sockets. If you
|
||||
* alter this routine remember it must be re-entrant.
|
||||
*
|
||||
* This function will lock the socket if a skb is returned, so the caller
|
||||
* needs to unlock the socket in that case (usually by calling
|
||||
* skb_free_datagram)
|
||||
* This function will lock the socket if a skb is returned, so
|
||||
* the caller needs to unlock the socket in that case (usually by
|
||||
* calling skb_free_datagram). Returns NULL with *err set to
|
||||
* -EAGAIN if no data was available or to some other value if an
|
||||
* error was detected.
|
||||
*
|
||||
* * It does not lock socket since today. This function is
|
||||
* * free of race conditions. This measure should/can improve
|
||||
@@ -191,13 +196,13 @@ static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
|
||||
* quite explicitly by POSIX 1003.1g, don't change them without having
|
||||
* the standard around please.
|
||||
*/
|
||||
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
int *peeked, int *off, int *err)
|
||||
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
int *peeked, int *off, int *err,
|
||||
struct sk_buff **last)
|
||||
{
|
||||
struct sk_buff_head *queue = &sk->sk_receive_queue;
|
||||
struct sk_buff *skb, *last;
|
||||
struct sk_buff *skb;
|
||||
unsigned long cpu_flags;
|
||||
long timeo;
|
||||
/*
|
||||
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
|
||||
*/
|
||||
@@ -206,8 +211,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
if (error)
|
||||
goto no_packet;
|
||||
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
do {
|
||||
/* Again only user level code calls this function, so nothing
|
||||
* interrupt level will suddenly eat the receive_queue.
|
||||
@@ -217,10 +220,10 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
*/
|
||||
int _off = *off;
|
||||
|
||||
last = (struct sk_buff *)queue;
|
||||
*last = (struct sk_buff *)queue;
|
||||
spin_lock_irqsave(&queue->lock, cpu_flags);
|
||||
skb_queue_walk(queue, skb) {
|
||||
last = skb;
|
||||
*last = skb;
|
||||
*peeked = skb->peeked;
|
||||
if (flags & MSG_PEEK) {
|
||||
if (_off >= skb->len && (skb->len || _off ||
|
||||
@@ -231,8 +234,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
|
||||
skb = skb_set_peeked(skb);
|
||||
error = PTR_ERR(skb);
|
||||
if (IS_ERR(skb))
|
||||
goto unlock_err;
|
||||
if (IS_ERR(skb)) {
|
||||
spin_unlock_irqrestore(&queue->lock,
|
||||
cpu_flags);
|
||||
goto no_packet;
|
||||
}
|
||||
|
||||
atomic_inc(&skb->users);
|
||||
} else
|
||||
@@ -242,27 +248,40 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
*off = _off;
|
||||
return skb;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->lock, cpu_flags);
|
||||
} while (sk_can_busy_loop(sk) &&
|
||||
sk_busy_loop(sk, flags & MSG_DONTWAIT));
|
||||
|
||||
if (sk_can_busy_loop(sk) &&
|
||||
sk_busy_loop(sk, flags & MSG_DONTWAIT))
|
||||
continue;
|
||||
error = -EAGAIN;
|
||||
|
||||
/* User doesn't want to wait */
|
||||
error = -EAGAIN;
|
||||
if (!timeo)
|
||||
goto no_packet;
|
||||
|
||||
} while (!wait_for_more_packets(sk, err, &timeo, last));
|
||||
|
||||
return NULL;
|
||||
|
||||
unlock_err:
|
||||
spin_unlock_irqrestore(&queue->lock, cpu_flags);
|
||||
no_packet:
|
||||
*err = error;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_try_recv_datagram);
|
||||
|
||||
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
int *peeked, int *off, int *err)
|
||||
{
|
||||
struct sk_buff *skb, *last;
|
||||
long timeo;
|
||||
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
do {
|
||||
skb = __skb_try_recv_datagram(sk, flags, peeked, off, err,
|
||||
&last);
|
||||
if (skb)
|
||||
return skb;
|
||||
|
||||
if (*err != -EAGAIN)
|
||||
break;
|
||||
} while (timeo &&
|
||||
!__skb_wait_for_more_packets(sk, err, &timeo, last));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_recv_datagram);
|
||||
|
||||
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||
|
||||
414
net/core/dev.c
414
net/core/dev.c
@@ -96,6 +96,7 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/stat.h>
|
||||
#include <net/dst.h>
|
||||
@@ -137,6 +138,7 @@
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/netfilter_ingress.h>
|
||||
#include <linux/sctp.h>
|
||||
|
||||
#include "net-sysfs.h"
|
||||
|
||||
@@ -182,8 +184,8 @@ EXPORT_SYMBOL(dev_base_lock);
|
||||
/* protects napi_hash addition/deletion and napi_gen_id */
|
||||
static DEFINE_SPINLOCK(napi_hash_lock);
|
||||
|
||||
static unsigned int napi_gen_id;
|
||||
static DEFINE_HASHTABLE(napi_hash, 8);
|
||||
static unsigned int napi_gen_id = NR_CPUS;
|
||||
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
|
||||
|
||||
static seqcount_t devnet_rename_seq;
|
||||
|
||||
@@ -1674,6 +1676,22 @@ void net_dec_ingress_queue(void)
|
||||
EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_EGRESS
|
||||
static struct static_key egress_needed __read_mostly;
|
||||
|
||||
void net_inc_egress_queue(void)
|
||||
{
|
||||
static_key_slow_inc(&egress_needed);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(net_inc_egress_queue);
|
||||
|
||||
void net_dec_egress_queue(void)
|
||||
{
|
||||
static_key_slow_dec(&egress_needed);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(net_dec_egress_queue);
|
||||
#endif
|
||||
|
||||
static struct static_key netstamp_needed __read_mostly;
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
/* We are not allowed to call static_key_slow_dec() from irq context
|
||||
@@ -2470,6 +2488,141 @@ int skb_checksum_help(struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(skb_checksum_help);
|
||||
|
||||
/* skb_csum_offload_check - Driver helper function to determine if a device
|
||||
* with limited checksum offload capabilities is able to offload the checksum
|
||||
* for a given packet.
|
||||
*
|
||||
* Arguments:
|
||||
* skb - sk_buff for the packet in question
|
||||
* spec - contains the description of what device can offload
|
||||
* csum_encapped - returns true if the checksum being offloaded is
|
||||
* encpasulated. That is it is checksum for the transport header
|
||||
* in the inner headers.
|
||||
* checksum_help - when set indicates that helper function should
|
||||
* call skb_checksum_help if offload checks fail
|
||||
*
|
||||
* Returns:
|
||||
* true: Packet has passed the checksum checks and should be offloadable to
|
||||
* the device (a driver may still need to check for additional
|
||||
* restrictions of its device)
|
||||
* false: Checksum is not offloadable. If checksum_help was set then
|
||||
* skb_checksum_help was called to resolve checksum for non-GSO
|
||||
* packets and when IP protocol is not SCTP
|
||||
*/
|
||||
bool __skb_csum_offload_chk(struct sk_buff *skb,
|
||||
const struct skb_csum_offl_spec *spec,
|
||||
bool *csum_encapped,
|
||||
bool csum_help)
|
||||
{
|
||||
struct iphdr *iph;
|
||||
struct ipv6hdr *ipv6;
|
||||
void *nhdr;
|
||||
int protocol;
|
||||
u8 ip_proto;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb->protocol == htons(ETH_P_8021AD)) {
|
||||
if (!spec->vlan_okay)
|
||||
goto need_help;
|
||||
}
|
||||
|
||||
/* We check whether the checksum refers to a transport layer checksum in
|
||||
* the outermost header or an encapsulated transport layer checksum that
|
||||
* corresponds to the inner headers of the skb. If the checksum is for
|
||||
* something else in the packet we need help.
|
||||
*/
|
||||
if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
|
||||
/* Non-encapsulated checksum */
|
||||
protocol = eproto_to_ipproto(vlan_get_protocol(skb));
|
||||
nhdr = skb_network_header(skb);
|
||||
*csum_encapped = false;
|
||||
if (spec->no_not_encapped)
|
||||
goto need_help;
|
||||
} else if (skb->encapsulation && spec->encap_okay &&
|
||||
skb_checksum_start_offset(skb) ==
|
||||
skb_inner_transport_offset(skb)) {
|
||||
/* Encapsulated checksum */
|
||||
*csum_encapped = true;
|
||||
switch (skb->inner_protocol_type) {
|
||||
case ENCAP_TYPE_ETHER:
|
||||
protocol = eproto_to_ipproto(skb->inner_protocol);
|
||||
break;
|
||||
case ENCAP_TYPE_IPPROTO:
|
||||
protocol = skb->inner_protocol;
|
||||
break;
|
||||
}
|
||||
nhdr = skb_inner_network_header(skb);
|
||||
} else {
|
||||
goto need_help;
|
||||
}
|
||||
|
||||
switch (protocol) {
|
||||
case IPPROTO_IP:
|
||||
if (!spec->ipv4_okay)
|
||||
goto need_help;
|
||||
iph = nhdr;
|
||||
ip_proto = iph->protocol;
|
||||
if (iph->ihl != 5 && !spec->ip_options_okay)
|
||||
goto need_help;
|
||||
break;
|
||||
case IPPROTO_IPV6:
|
||||
if (!spec->ipv6_okay)
|
||||
goto need_help;
|
||||
if (spec->no_encapped_ipv6 && *csum_encapped)
|
||||
goto need_help;
|
||||
ipv6 = nhdr;
|
||||
nhdr += sizeof(*ipv6);
|
||||
ip_proto = ipv6->nexthdr;
|
||||
break;
|
||||
default:
|
||||
goto need_help;
|
||||
}
|
||||
|
||||
ip_proto_again:
|
||||
switch (ip_proto) {
|
||||
case IPPROTO_TCP:
|
||||
if (!spec->tcp_okay ||
|
||||
skb->csum_offset != offsetof(struct tcphdr, check))
|
||||
goto need_help;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (!spec->udp_okay ||
|
||||
skb->csum_offset != offsetof(struct udphdr, check))
|
||||
goto need_help;
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
if (!spec->sctp_okay ||
|
||||
skb->csum_offset != offsetof(struct sctphdr, checksum))
|
||||
goto cant_help;
|
||||
break;
|
||||
case NEXTHDR_HOP:
|
||||
case NEXTHDR_ROUTING:
|
||||
case NEXTHDR_DEST: {
|
||||
u8 *opthdr = nhdr;
|
||||
|
||||
if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
|
||||
goto need_help;
|
||||
|
||||
ip_proto = opthdr[0];
|
||||
nhdr += (opthdr[1] + 1) << 3;
|
||||
|
||||
goto ip_proto_again;
|
||||
}
|
||||
default:
|
||||
goto need_help;
|
||||
}
|
||||
|
||||
/* Passed the tests for offloading checksum */
|
||||
return true;
|
||||
|
||||
need_help:
|
||||
if (csum_help && !skb_shinfo(skb)->gso_size)
|
||||
skb_checksum_help(skb);
|
||||
cant_help:
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_csum_offload_chk);
|
||||
|
||||
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
|
||||
{
|
||||
__be16 type = skb->protocol;
|
||||
@@ -2644,7 +2797,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, type)) {
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
features &= ~NETIF_F_CSUM_MASK;
|
||||
} else if (illegal_highdma(skb->dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
}
|
||||
@@ -2791,7 +2944,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
|
||||
else
|
||||
skb_set_transport_header(skb,
|
||||
skb_checksum_start_offset(skb));
|
||||
if (!(features & NETIF_F_ALL_CSUM) &&
|
||||
if (!(features & NETIF_F_CSUM_MASK) &&
|
||||
skb_checksum_help(skb))
|
||||
goto out_kfree_skb;
|
||||
}
|
||||
@@ -2870,7 +3023,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
||||
bool contended;
|
||||
int rc;
|
||||
|
||||
qdisc_pkt_len_init(skb);
|
||||
qdisc_calculate_pkt_len(skb, q);
|
||||
/*
|
||||
* Heuristic to force contended enqueues to serialize on a
|
||||
@@ -2928,7 +3080,8 @@ static void skb_update_prio(struct sk_buff *skb)
|
||||
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
|
||||
|
||||
if (!skb->priority && skb->sk && map) {
|
||||
unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
|
||||
unsigned int prioidx =
|
||||
sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
|
||||
|
||||
if (prioidx < map->priomap_len)
|
||||
skb->priority = map->priomap[prioidx];
|
||||
@@ -2962,6 +3115,49 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(dev_loopback_xmit);
|
||||
|
||||
#ifdef CONFIG_NET_EGRESS
|
||||
static struct sk_buff *
|
||||
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
|
||||
{
|
||||
struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
|
||||
struct tcf_result cl_res;
|
||||
|
||||
if (!cl)
|
||||
return skb;
|
||||
|
||||
/* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
|
||||
* earlier by the caller.
|
||||
*/
|
||||
qdisc_bstats_cpu_update(cl->q, skb);
|
||||
|
||||
switch (tc_classify(skb, cl, &cl_res, false)) {
|
||||
case TC_ACT_OK:
|
||||
case TC_ACT_RECLASSIFY:
|
||||
skb->tc_index = TC_H_MIN(cl_res.classid);
|
||||
break;
|
||||
case TC_ACT_SHOT:
|
||||
qdisc_qstats_cpu_drop(cl->q);
|
||||
*ret = NET_XMIT_DROP;
|
||||
goto drop;
|
||||
case TC_ACT_STOLEN:
|
||||
case TC_ACT_QUEUED:
|
||||
*ret = NET_XMIT_SUCCESS;
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
case TC_ACT_REDIRECT:
|
||||
/* No need to push/pop skb's mac_header here on egress! */
|
||||
skb_do_redirect(skb);
|
||||
*ret = NET_XMIT_SUCCESS;
|
||||
return NULL;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
#endif /* CONFIG_NET_EGRESS */
|
||||
|
||||
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
@@ -3021,7 +3217,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
int queue_index = 0;
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
if (skb->sender_cpu == 0)
|
||||
u32 sender_cpu = skb->sender_cpu - 1;
|
||||
|
||||
if (sender_cpu >= (u32)NR_CPUS)
|
||||
skb->sender_cpu = raw_smp_processor_id() + 1;
|
||||
#endif
|
||||
|
||||
@@ -3086,6 +3284,17 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||||
|
||||
skb_update_prio(skb);
|
||||
|
||||
qdisc_pkt_len_init(skb);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
|
||||
# ifdef CONFIG_NET_EGRESS
|
||||
if (static_key_false(&egress_needed)) {
|
||||
skb = sch_handle_egress(skb, &rc, dev);
|
||||
if (!skb)
|
||||
goto out;
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
/* If device/qdisc don't need skb->dst, release it right now while
|
||||
* its hot in this cpu cache.
|
||||
*/
|
||||
@@ -3107,9 +3316,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||||
txq = netdev_pick_tx(dev, skb, accel_priv);
|
||||
q = rcu_dereference_bh(txq->qdisc);
|
||||
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
|
||||
#endif
|
||||
trace_net_dev_queue(skb);
|
||||
if (q->enqueue) {
|
||||
rc = __dev_xmit_skb(skb, q, dev, txq);
|
||||
@@ -3666,9 +3872,9 @@ int (*br_fdb_test_addr_hook)(struct net_device *dev,
|
||||
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
|
||||
#endif
|
||||
|
||||
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
|
||||
struct packet_type **pt_prev,
|
||||
int *ret, struct net_device *orig_dev)
|
||||
static inline struct sk_buff *
|
||||
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
|
||||
struct net_device *orig_dev)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
|
||||
@@ -3862,7 +4068,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
|
||||
skip_taps:
|
||||
#ifdef CONFIG_NET_INGRESS
|
||||
if (static_key_false(&ingress_needed)) {
|
||||
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
|
||||
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
@@ -4353,6 +4559,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
|
||||
|
||||
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
skb_mark_napi_id(skb, napi);
|
||||
trace_napi_gro_receive_entry(skb);
|
||||
|
||||
skb_gro_reset_offset(skb);
|
||||
@@ -4386,7 +4593,10 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
|
||||
|
||||
if (!skb) {
|
||||
skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
|
||||
napi->skb = skb;
|
||||
if (skb) {
|
||||
napi->skb = skb;
|
||||
skb_mark_napi_id(skb, napi);
|
||||
}
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
@@ -4661,7 +4871,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
|
||||
EXPORT_SYMBOL(napi_complete_done);
|
||||
|
||||
/* must be called under rcu_read_lock(), as we dont take a reference */
|
||||
struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
static struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
{
|
||||
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
|
||||
struct napi_struct *napi;
|
||||
@@ -4672,43 +4882,101 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_by_id);
|
||||
|
||||
#if defined(CONFIG_NET_RX_BUSY_POLL)
|
||||
#define BUSY_POLL_BUDGET 8
|
||||
bool sk_busy_loop(struct sock *sk, int nonblock)
|
||||
{
|
||||
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
|
||||
int (*busy_poll)(struct napi_struct *dev);
|
||||
struct napi_struct *napi;
|
||||
int rc = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
napi = napi_by_id(sk->sk_napi_id);
|
||||
if (!napi)
|
||||
goto out;
|
||||
|
||||
/* Note: ndo_busy_poll method is optional in linux-4.5 */
|
||||
busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
|
||||
|
||||
do {
|
||||
rc = 0;
|
||||
local_bh_disable();
|
||||
if (busy_poll) {
|
||||
rc = busy_poll(napi);
|
||||
} else if (napi_schedule_prep(napi)) {
|
||||
void *have = netpoll_poll_lock(napi);
|
||||
|
||||
if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
|
||||
rc = napi->poll(napi, BUSY_POLL_BUDGET);
|
||||
trace_napi_poll(napi);
|
||||
if (rc == BUSY_POLL_BUDGET) {
|
||||
napi_complete_done(napi, rc);
|
||||
napi_schedule(napi);
|
||||
}
|
||||
}
|
||||
netpoll_poll_unlock(have);
|
||||
}
|
||||
if (rc > 0)
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
local_bh_enable();
|
||||
|
||||
if (rc == LL_FLUSH_FAILED)
|
||||
break; /* permanent failure */
|
||||
|
||||
cpu_relax();
|
||||
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
|
||||
!need_resched() && !busy_loop_timeout(end_time));
|
||||
|
||||
rc = !skb_queue_empty(&sk->sk_receive_queue);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_busy_loop);
|
||||
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
void napi_hash_add(struct napi_struct *napi)
|
||||
{
|
||||
if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
|
||||
if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
|
||||
test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
|
||||
return;
|
||||
|
||||
spin_lock(&napi_hash_lock);
|
||||
spin_lock(&napi_hash_lock);
|
||||
|
||||
/* 0 is not a valid id, we also skip an id that is taken
|
||||
* we expect both events to be extremely rare
|
||||
*/
|
||||
napi->napi_id = 0;
|
||||
while (!napi->napi_id) {
|
||||
napi->napi_id = ++napi_gen_id;
|
||||
if (napi_by_id(napi->napi_id))
|
||||
napi->napi_id = 0;
|
||||
}
|
||||
/* 0..NR_CPUS+1 range is reserved for sender_cpu use */
|
||||
do {
|
||||
if (unlikely(++napi_gen_id < NR_CPUS + 1))
|
||||
napi_gen_id = NR_CPUS + 1;
|
||||
} while (napi_by_id(napi_gen_id));
|
||||
napi->napi_id = napi_gen_id;
|
||||
|
||||
hlist_add_head_rcu(&napi->napi_hash_node,
|
||||
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
|
||||
hlist_add_head_rcu(&napi->napi_hash_node,
|
||||
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
|
||||
|
||||
spin_unlock(&napi_hash_lock);
|
||||
}
|
||||
spin_unlock(&napi_hash_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_hash_add);
|
||||
|
||||
/* Warning : caller is responsible to make sure rcu grace period
|
||||
* is respected before freeing memory containing @napi
|
||||
*/
|
||||
void napi_hash_del(struct napi_struct *napi)
|
||||
bool napi_hash_del(struct napi_struct *napi)
|
||||
{
|
||||
bool rcu_sync_needed = false;
|
||||
|
||||
spin_lock(&napi_hash_lock);
|
||||
|
||||
if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
|
||||
if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
|
||||
rcu_sync_needed = true;
|
||||
hlist_del_rcu(&napi->napi_hash_node);
|
||||
|
||||
}
|
||||
spin_unlock(&napi_hash_lock);
|
||||
return rcu_sync_needed;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_hash_del);
|
||||
|
||||
@@ -4744,6 +5012,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
|
||||
napi->poll_owner = -1;
|
||||
#endif
|
||||
set_bit(NAPI_STATE_SCHED, &napi->state);
|
||||
napi_hash_add(napi);
|
||||
}
|
||||
EXPORT_SYMBOL(netif_napi_add);
|
||||
|
||||
@@ -4763,8 +5032,12 @@ void napi_disable(struct napi_struct *n)
|
||||
}
|
||||
EXPORT_SYMBOL(napi_disable);
|
||||
|
||||
/* Must be called in process context */
|
||||
void netif_napi_del(struct napi_struct *napi)
|
||||
{
|
||||
might_sleep();
|
||||
if (napi_hash_del(napi))
|
||||
synchronize_net();
|
||||
list_del_init(&napi->dev_list);
|
||||
napi_free_frags(napi);
|
||||
|
||||
@@ -5351,7 +5624,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
|
||||
|
||||
static int __netdev_upper_dev_link(struct net_device *dev,
|
||||
struct net_device *upper_dev, bool master,
|
||||
void *private)
|
||||
void *upper_priv, void *upper_info)
|
||||
{
|
||||
struct netdev_notifier_changeupper_info changeupper_info;
|
||||
struct netdev_adjacent *i, *j, *to_i, *to_j;
|
||||
@@ -5375,6 +5648,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
|
||||
changeupper_info.upper_dev = upper_dev;
|
||||
changeupper_info.master = master;
|
||||
changeupper_info.linking = true;
|
||||
changeupper_info.upper_info = upper_info;
|
||||
|
||||
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
|
||||
&changeupper_info.info);
|
||||
@@ -5382,7 +5656,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
|
||||
ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
|
||||
master);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -5420,8 +5694,12 @@ static int __netdev_upper_dev_link(struct net_device *dev,
|
||||
goto rollback_lower_mesh;
|
||||
}
|
||||
|
||||
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
|
||||
&changeupper_info.info);
|
||||
ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
|
||||
&changeupper_info.info);
|
||||
ret = notifier_to_errno(ret);
|
||||
if (ret)
|
||||
goto rollback_lower_mesh;
|
||||
|
||||
return 0;
|
||||
|
||||
rollback_lower_mesh:
|
||||
@@ -5475,7 +5753,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
|
||||
int netdev_upper_dev_link(struct net_device *dev,
|
||||
struct net_device *upper_dev)
|
||||
{
|
||||
return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
|
||||
return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_upper_dev_link);
|
||||
|
||||
@@ -5483,6 +5761,8 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
|
||||
* netdev_master_upper_dev_link - Add a master link to the upper device
|
||||
* @dev: device
|
||||
* @upper_dev: new upper device
|
||||
* @upper_priv: upper device private
|
||||
* @upper_info: upper info to be passed down via notifier
|
||||
*
|
||||
* Adds a link to device which is upper to this one. In this case, only
|
||||
* one master upper device can be linked, although other non-master devices
|
||||
@@ -5491,20 +5771,14 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
|
||||
* counts are adjusted and the function returns zero.
|
||||
*/
|
||||
int netdev_master_upper_dev_link(struct net_device *dev,
|
||||
struct net_device *upper_dev)
|
||||
struct net_device *upper_dev,
|
||||
void *upper_priv, void *upper_info)
|
||||
{
|
||||
return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
|
||||
return __netdev_upper_dev_link(dev, upper_dev, true,
|
||||
upper_priv, upper_info);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_master_upper_dev_link);
|
||||
|
||||
int netdev_master_upper_dev_link_private(struct net_device *dev,
|
||||
struct net_device *upper_dev,
|
||||
void *private)
|
||||
{
|
||||
return __netdev_upper_dev_link(dev, upper_dev, true, private);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
|
||||
|
||||
/**
|
||||
* netdev_upper_dev_unlink - Removes a link to upper device
|
||||
* @dev: device
|
||||
@@ -5663,7 +5937,7 @@ EXPORT_SYMBOL(netdev_lower_dev_get_private);
|
||||
|
||||
|
||||
int dev_get_nest_level(struct net_device *dev,
|
||||
bool (*type_check)(struct net_device *dev))
|
||||
bool (*type_check)(const struct net_device *dev))
|
||||
{
|
||||
struct net_device *lower = NULL;
|
||||
struct list_head *iter;
|
||||
@@ -5685,6 +5959,26 @@ int dev_get_nest_level(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_nest_level);
|
||||
|
||||
/**
|
||||
* netdev_lower_change - Dispatch event about lower device state change
|
||||
* @lower_dev: device
|
||||
* @lower_state_info: state to dispatch
|
||||
*
|
||||
* Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
|
||||
* The caller must hold the RTNL lock.
|
||||
*/
|
||||
void netdev_lower_state_changed(struct net_device *lower_dev,
|
||||
void *lower_state_info)
|
||||
{
|
||||
struct netdev_notifier_changelowerstate_info changelowerstate_info;
|
||||
|
||||
ASSERT_RTNL();
|
||||
changelowerstate_info.lower_state_info = lower_state_info;
|
||||
call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
|
||||
&changelowerstate_info.info);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_state_changed);
|
||||
|
||||
static void dev_change_rx_flags(struct net_device *dev, int flags)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
@@ -6375,9 +6669,9 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
|
||||
/* UFO needs SG and checksumming */
|
||||
if (features & NETIF_F_UFO) {
|
||||
/* maybe split UFO into V4 and V6? */
|
||||
if (!((features & NETIF_F_GEN_CSUM) ||
|
||||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
|
||||
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
|
||||
if (!(features & NETIF_F_HW_CSUM) &&
|
||||
((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
|
||||
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
|
||||
netdev_dbg(dev,
|
||||
"Dropping NETIF_F_UFO since no checksum offload features.\n");
|
||||
features &= ~NETIF_F_UFO;
|
||||
@@ -7164,11 +7458,13 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
|
||||
* This function does the last stage of destroying an allocated device
|
||||
* interface. The reference to the device object is released.
|
||||
* If this is the last reference then it will be freed.
|
||||
* Must be called in process context.
|
||||
*/
|
||||
void free_netdev(struct net_device *dev)
|
||||
{
|
||||
struct napi_struct *p, *n;
|
||||
|
||||
might_sleep();
|
||||
netif_free_tx_queues(dev);
|
||||
#ifdef CONFIG_SYSFS
|
||||
kvfree(dev->_rx);
|
||||
@@ -7477,16 +7773,16 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
||||
netdev_features_t netdev_increment_features(netdev_features_t all,
|
||||
netdev_features_t one, netdev_features_t mask)
|
||||
{
|
||||
if (mask & NETIF_F_GEN_CSUM)
|
||||
mask |= NETIF_F_ALL_CSUM;
|
||||
if (mask & NETIF_F_HW_CSUM)
|
||||
mask |= NETIF_F_CSUM_MASK;
|
||||
mask |= NETIF_F_VLAN_CHALLENGED;
|
||||
|
||||
all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
|
||||
all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
|
||||
all &= one | ~NETIF_F_ALL_FOR_ALL;
|
||||
|
||||
/* If one device supports hw checksumming, set for all. */
|
||||
if (all & NETIF_F_GEN_CSUM)
|
||||
all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
|
||||
if (all & NETIF_F_HW_CSUM)
|
||||
all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
|
||||
|
||||
return all;
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
||||
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
|
||||
|
||||
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
|
||||
[NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
|
||||
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
|
||||
[NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
|
||||
[NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
|
||||
[NETIF_F_RXHASH_BIT] = "rx-hashing",
|
||||
@@ -191,6 +191,23 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int phy_get_sset_count(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (phydev->drv->get_sset_count &&
|
||||
phydev->drv->get_strings &&
|
||||
phydev->drv->get_stats) {
|
||||
mutex_lock(&phydev->lock);
|
||||
ret = phydev->drv->get_sset_count(phydev);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int __ethtool_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
@@ -204,6 +221,13 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
|
||||
if (sset == ETH_SS_TUNABLES)
|
||||
return ARRAY_SIZE(tunable_strings);
|
||||
|
||||
if (sset == ETH_SS_PHY_STATS) {
|
||||
if (dev->phydev)
|
||||
return phy_get_sset_count(dev->phydev);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (ops->get_sset_count && ops->get_strings)
|
||||
return ops->get_sset_count(dev, sset);
|
||||
else
|
||||
@@ -223,7 +247,17 @@ static void __ethtool_get_strings(struct net_device *dev,
|
||||
sizeof(rss_hash_func_strings));
|
||||
else if (stringset == ETH_SS_TUNABLES)
|
||||
memcpy(data, tunable_strings, sizeof(tunable_strings));
|
||||
else
|
||||
else if (stringset == ETH_SS_PHY_STATS) {
|
||||
struct phy_device *phydev = dev->phydev;
|
||||
|
||||
if (phydev) {
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->drv->get_strings(phydev, data);
|
||||
mutex_unlock(&phydev->lock);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
} else
|
||||
/* ops->get_strings is valid because checked earlier */
|
||||
ops->get_strings(dev, stringset, data);
|
||||
}
|
||||
@@ -235,7 +269,7 @@ static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
|
||||
switch (eth_cmd) {
|
||||
case ETHTOOL_GTXCSUM:
|
||||
case ETHTOOL_STXCSUM:
|
||||
return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
|
||||
return NETIF_F_CSUM_MASK | NETIF_F_SCTP_CRC;
|
||||
case ETHTOOL_GRXCSUM:
|
||||
case ETHTOOL_SRXCSUM:
|
||||
return NETIF_F_RXCSUM;
|
||||
@@ -1401,6 +1435,47 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
|
||||
{
|
||||
struct ethtool_stats stats;
|
||||
struct phy_device *phydev = dev->phydev;
|
||||
u64 *data;
|
||||
int ret, n_stats;
|
||||
|
||||
if (!phydev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
n_stats = phy_get_sset_count(phydev);
|
||||
|
||||
if (n_stats < 0)
|
||||
return n_stats;
|
||||
WARN_ON(n_stats == 0);
|
||||
|
||||
if (copy_from_user(&stats, useraddr, sizeof(stats)))
|
||||
return -EFAULT;
|
||||
|
||||
stats.n_stats = n_stats;
|
||||
data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->drv->get_stats(phydev, &stats, data);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_to_user(useraddr, &stats, sizeof(stats)))
|
||||
goto out;
|
||||
useraddr += sizeof(stats);
|
||||
if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
|
||||
{
|
||||
struct ethtool_perm_addr epaddr;
|
||||
@@ -1779,6 +1854,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
||||
case ETHTOOL_GSSET_INFO:
|
||||
case ETHTOOL_GSTRINGS:
|
||||
case ETHTOOL_GSTATS:
|
||||
case ETHTOOL_GPHYSTATS:
|
||||
case ETHTOOL_GTSO:
|
||||
case ETHTOOL_GPERMADDR:
|
||||
case ETHTOOL_GUFO:
|
||||
@@ -1991,6 +2067,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
||||
case ETHTOOL_STUNABLE:
|
||||
rc = ethtool_set_tunable(dev, useraddr);
|
||||
break;
|
||||
case ETHTOOL_GPHYSTATS:
|
||||
rc = ethtool_get_phy_stats(dev, useraddr);
|
||||
break;
|
||||
default:
|
||||
rc = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@
|
||||
#include <net/cls_cgroup.h>
|
||||
#include <net/dst_metadata.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
/**
|
||||
* sk_filter - run a packet through a socket filter
|
||||
@@ -348,12 +349,6 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
|
||||
* jump offsets, 2nd pass remapping:
|
||||
* new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
|
||||
* bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
|
||||
*
|
||||
* User BPF's register A is mapped to our BPF register 6, user BPF
|
||||
* register X is mapped to BPF register 7; frame pointer is always
|
||||
* register 10; Context 'void *ctx' is stored in register 1, that is,
|
||||
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
|
||||
* ctx == 'struct seccomp_data *'.
|
||||
*/
|
||||
static int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||
struct bpf_insn *new_prog, int *new_len)
|
||||
@@ -381,9 +376,22 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||
new_insn = new_prog;
|
||||
fp = prog;
|
||||
|
||||
if (new_insn)
|
||||
*new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
|
||||
new_insn++;
|
||||
/* Classic BPF related prologue emission. */
|
||||
if (new_insn) {
|
||||
/* Classic BPF expects A and X to be reset first. These need
|
||||
* to be guaranteed to be the first two instructions.
|
||||
*/
|
||||
*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
|
||||
*new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
|
||||
|
||||
/* All programs must keep CTX in callee saved BPF_REG_CTX.
|
||||
* In eBPF case it's done by the compiler, here we need to
|
||||
* do this ourself. Initial CTX is present in BPF_REG_ARG1.
|
||||
*/
|
||||
*new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
|
||||
} else {
|
||||
new_insn += 3;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; fp++, i++) {
|
||||
struct bpf_insn tmp_insns[6] = { };
|
||||
@@ -777,6 +785,11 @@ static int bpf_check_classic(const struct sock_filter *filter,
|
||||
if (ftest->k == 0)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case BPF_ALU | BPF_LSH | BPF_K:
|
||||
case BPF_ALU | BPF_RSH | BPF_K:
|
||||
if (ftest->k >= 32)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case BPF_LD | BPF_MEM:
|
||||
case BPF_LDX | BPF_MEM:
|
||||
case BPF_ST:
|
||||
@@ -1160,6 +1173,68 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *old_prog;
|
||||
int err;
|
||||
|
||||
if (bpf_prog_size(prog->len) > sysctl_optmem_max)
|
||||
return -ENOMEM;
|
||||
|
||||
if (sk_unhashed(sk)) {
|
||||
err = reuseport_alloc(sk);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
|
||||
/* The socket wasn't bound with SO_REUSEPORT */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
old_prog = reuseport_attach_prog(sk, prog);
|
||||
if (old_prog)
|
||||
bpf_prog_destroy(old_prog);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
unsigned int fsize = bpf_classic_proglen(fprog);
|
||||
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
|
||||
struct bpf_prog *prog;
|
||||
int err;
|
||||
|
||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* Make sure new filter is there and in the right amounts. */
|
||||
if (fprog->filter == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
prog = bpf_prog_alloc(bpf_fsize, 0);
|
||||
if (!prog)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (copy_from_user(prog->insns, fprog->filter, fsize)) {
|
||||
__bpf_prog_free(prog);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
prog->len = fprog->len;
|
||||
|
||||
err = bpf_prog_store_orig_filter(prog, fprog);
|
||||
if (err) {
|
||||
__bpf_prog_free(prog);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* bpf_prepare_filter() already takes care of freeing
|
||||
* memory in case something goes wrong.
|
||||
*/
|
||||
return bpf_prepare_filter(prog, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_attach_filter - attach a socket filter
|
||||
* @fprog: the filter program
|
||||
@@ -1172,39 +1247,9 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
|
||||
*/
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
unsigned int fsize = bpf_classic_proglen(fprog);
|
||||
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_prog *prog = __get_filter(fprog, sk);
|
||||
int err;
|
||||
|
||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
return -EPERM;
|
||||
|
||||
/* Make sure new filter is there and in the right amounts. */
|
||||
if (fprog->filter == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
prog = bpf_prog_alloc(bpf_fsize, 0);
|
||||
if (!prog)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(prog->insns, fprog->filter, fsize)) {
|
||||
__bpf_prog_free(prog);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
prog->len = fprog->len;
|
||||
|
||||
err = bpf_prog_store_orig_filter(prog, fprog);
|
||||
if (err) {
|
||||
__bpf_prog_free(prog);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* bpf_prepare_filter() already takes care of freeing
|
||||
* memory in case something goes wrong.
|
||||
*/
|
||||
prog = bpf_prepare_filter(prog, NULL);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
@@ -1218,23 +1263,50 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_attach_filter);
|
||||
|
||||
int sk_attach_bpf(u32 ufd, struct sock *sk)
|
||||
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_prog *prog = __get_filter(fprog, sk);
|
||||
int err;
|
||||
|
||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
return -EPERM;
|
||||
|
||||
prog = bpf_prog_get(ufd);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
err = __reuseport_attach_prog(prog, sk);
|
||||
if (err < 0) {
|
||||
__bpf_prog_release(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
prog = bpf_prog_get(ufd);
|
||||
if (IS_ERR(prog))
|
||||
return prog;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
|
||||
bpf_prog_put(prog);
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return prog;
|
||||
}
|
||||
|
||||
int sk_attach_bpf(u32 ufd, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *prog = __get_bpf(ufd, sk);
|
||||
int err;
|
||||
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
err = __sk_attach_prog(prog, sk);
|
||||
if (err < 0) {
|
||||
bpf_prog_put(prog);
|
||||
@@ -1244,7 +1316,24 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
|
||||
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *prog = __get_bpf(ufd, sk);
|
||||
int err;
|
||||
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
err = __reuseport_attach_prog(prog, sk);
|
||||
if (err < 0) {
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BPF_LDST_LEN 16U
|
||||
|
||||
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
{
|
||||
@@ -1252,9 +1341,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
int offset = (int) r2;
|
||||
void *from = (void *) (long) r3;
|
||||
unsigned int len = (unsigned int) r4;
|
||||
char buf[16];
|
||||
char buf[BPF_LDST_LEN];
|
||||
void *ptr;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM)))
|
||||
return -EINVAL;
|
||||
|
||||
/* bpf verifier guarantees that:
|
||||
* 'from' pointer points to bpf program stack
|
||||
* 'len' bytes of it were initialized
|
||||
@@ -1274,7 +1366,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
if (unlikely(!ptr))
|
||||
return -EFAULT;
|
||||
|
||||
if (BPF_RECOMPUTE_CSUM(flags))
|
||||
if (flags & BPF_F_RECOMPUTE_CSUM)
|
||||
skb_postpull_rcsum(skb, ptr, len);
|
||||
|
||||
memcpy(ptr, from, len);
|
||||
@@ -1283,8 +1375,9 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
/* skb_store_bits cannot return -EFAULT here */
|
||||
skb_store_bits(skb, offset, ptr, len);
|
||||
|
||||
if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
|
||||
if (flags & BPF_F_RECOMPUTE_CSUM)
|
||||
skb_postpush_rcsum(skb, ptr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1299,8 +1392,35 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
|
||||
#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
|
||||
static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
|
||||
int offset = (int) r2;
|
||||
void *to = (void *)(unsigned long) r3;
|
||||
unsigned int len = (unsigned int) r4;
|
||||
void *ptr;
|
||||
|
||||
if (unlikely((u32) offset > 0xffff || len > BPF_LDST_LEN))
|
||||
return -EFAULT;
|
||||
|
||||
ptr = skb_header_pointer(skb, offset, len, to);
|
||||
if (unlikely(!ptr))
|
||||
return -EFAULT;
|
||||
if (ptr != to)
|
||||
memcpy(to, ptr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_skb_load_bytes_proto = {
|
||||
.func = bpf_skb_load_bytes,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_STACK,
|
||||
.arg4_type = ARG_CONST_STACK_SIZE,
|
||||
};
|
||||
|
||||
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
||||
{
|
||||
@@ -1308,6 +1428,8 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
||||
int offset = (int) r2;
|
||||
__sum16 sum, *ptr;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
|
||||
return -EINVAL;
|
||||
if (unlikely((u32) offset > 0xffff))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -1319,7 +1441,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
||||
if (unlikely(!ptr))
|
||||
return -EFAULT;
|
||||
|
||||
switch (BPF_HEADER_FIELD_SIZE(flags)) {
|
||||
switch (flags & BPF_F_HDR_FIELD_MASK) {
|
||||
case 2:
|
||||
csum_replace2(ptr, from, to);
|
||||
break;
|
||||
@@ -1351,10 +1473,12 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
|
||||
static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
||||
bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
|
||||
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
|
||||
int offset = (int) r2;
|
||||
__sum16 sum, *ptr;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
|
||||
return -EINVAL;
|
||||
if (unlikely((u32) offset > 0xffff))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -1366,7 +1490,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
||||
if (unlikely(!ptr))
|
||||
return -EFAULT;
|
||||
|
||||
switch (BPF_HEADER_FIELD_SIZE(flags)) {
|
||||
switch (flags & BPF_F_HDR_FIELD_MASK) {
|
||||
case 2:
|
||||
inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
|
||||
break;
|
||||
@@ -1395,13 +1519,14 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = {
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1)
|
||||
|
||||
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
|
||||
struct net_device *dev;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return -EINVAL;
|
||||
|
||||
dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
|
||||
if (unlikely(!dev))
|
||||
return -EINVAL;
|
||||
@@ -1410,8 +1535,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
|
||||
if (unlikely(!skb2))
|
||||
return -ENOMEM;
|
||||
|
||||
if (BPF_IS_REDIRECT_INGRESS(flags))
|
||||
if (flags & BPF_F_INGRESS) {
|
||||
if (skb_at_tc_ingress(skb2))
|
||||
skb_postpush_rcsum(skb2, skb_mac_header(skb2),
|
||||
skb2->mac_len);
|
||||
return dev_forward_skb(dev, skb2);
|
||||
}
|
||||
|
||||
skb2->dev = dev;
|
||||
skb_sender_cpu_clear(skb2);
|
||||
@@ -1433,12 +1562,17 @@ struct redirect_info {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
|
||||
|
||||
static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
||||
return TC_ACT_SHOT;
|
||||
|
||||
ri->ifindex = ifindex;
|
||||
ri->flags = flags;
|
||||
|
||||
return TC_ACT_REDIRECT;
|
||||
}
|
||||
|
||||
@@ -1454,8 +1588,12 @@ int skb_do_redirect(struct sk_buff *skb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (BPF_IS_REDIRECT_INGRESS(ri->flags))
|
||||
if (ri->flags & BPF_F_INGRESS) {
|
||||
if (skb_at_tc_ingress(skb))
|
||||
skb_postpush_rcsum(skb, skb_mac_header(skb),
|
||||
skb->mac_len);
|
||||
return dev_forward_skb(dev, skb);
|
||||
}
|
||||
|
||||
skb->dev = dev;
|
||||
skb_sender_cpu_clear(skb);
|
||||
@@ -1547,19 +1685,49 @@ bool bpf_helper_changes_skb_data(void *func)
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned short bpf_tunnel_key_af(u64 flags)
|
||||
{
|
||||
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
|
||||
}
|
||||
|
||||
static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
||||
struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
u8 compat[sizeof(struct bpf_tunnel_key)];
|
||||
|
||||
if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
|
||||
return -EINVAL;
|
||||
if (ip_tunnel_info_af(info) != AF_INET)
|
||||
if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6))))
|
||||
return -EINVAL;
|
||||
if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags))
|
||||
return -EPROTO;
|
||||
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
|
||||
switch (size) {
|
||||
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
|
||||
/* Fixup deprecated structure layouts here, so we have
|
||||
* a common path later on.
|
||||
*/
|
||||
if (ip_tunnel_info_af(info) != AF_INET)
|
||||
return -EINVAL;
|
||||
to = (struct bpf_tunnel_key *)compat;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
to->tunnel_id = be64_to_cpu(info->key.tun_id);
|
||||
to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
|
||||
to->tunnel_tos = info->key.tos;
|
||||
to->tunnel_ttl = info->key.ttl;
|
||||
|
||||
if (flags & BPF_F_TUNINFO_IPV6)
|
||||
memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
|
||||
sizeof(to->remote_ipv6));
|
||||
else
|
||||
to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
|
||||
|
||||
if (unlikely(size != sizeof(struct bpf_tunnel_key)))
|
||||
memcpy((void *)(long) r2, to, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1581,10 +1749,25 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
|
||||
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
||||
struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
|
||||
struct metadata_dst *md = this_cpu_ptr(md_dst);
|
||||
u8 compat[sizeof(struct bpf_tunnel_key)];
|
||||
struct ip_tunnel_info *info;
|
||||
|
||||
if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
|
||||
if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
|
||||
return -EINVAL;
|
||||
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
|
||||
switch (size) {
|
||||
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
|
||||
/* Fixup deprecated structure layouts here, so we have
|
||||
* a common path later on.
|
||||
*/
|
||||
memcpy(compat, from, size);
|
||||
memset(compat + size, 0, sizeof(compat) - size);
|
||||
from = (struct bpf_tunnel_key *)compat;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
dst_hold((struct dst_entry *) md);
|
||||
@@ -1592,9 +1775,19 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
|
||||
|
||||
info = &md->u.tun_info;
|
||||
info->mode = IP_TUNNEL_INFO_TX;
|
||||
|
||||
info->key.tun_flags = TUNNEL_KEY;
|
||||
info->key.tun_id = cpu_to_be64(from->tunnel_id);
|
||||
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
|
||||
info->key.tos = from->tunnel_tos;
|
||||
info->key.ttl = from->tunnel_ttl;
|
||||
|
||||
if (flags & BPF_F_TUNINFO_IPV6) {
|
||||
info->mode |= IP_TUNNEL_INFO_IPV6;
|
||||
memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
|
||||
sizeof(from->remote_ipv6));
|
||||
} else {
|
||||
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1654,6 +1847,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_skb_store_bytes:
|
||||
return &bpf_skb_store_bytes_proto;
|
||||
case BPF_FUNC_skb_load_bytes:
|
||||
return &bpf_skb_load_bytes_proto;
|
||||
case BPF_FUNC_l3_csum_replace:
|
||||
return &bpf_l3_csum_replace_proto;
|
||||
case BPF_FUNC_l4_csum_replace:
|
||||
|
||||
@@ -471,6 +471,7 @@ static ssize_t phys_switch_id_show(struct device *dev,
|
||||
|
||||
if (dev_isalive(netdev)) {
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = netdev,
|
||||
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
|
||||
.flags = SWITCHDEV_F_NO_RECURSE,
|
||||
};
|
||||
@@ -1452,8 +1453,8 @@ static void netdev_release(struct device *d)
|
||||
|
||||
static const void *net_namespace(struct device *d)
|
||||
{
|
||||
struct net_device *dev;
|
||||
dev = container_of(d, struct net_device, dev);
|
||||
struct net_device *dev = to_net_dev(d);
|
||||
|
||||
return dev_net(dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,10 @@
|
||||
#include <trace/events/sock.h>
|
||||
#include <trace/events/udp.h>
|
||||
#include <trace/events/fib.h>
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#include <trace/events/fib6.h>
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
|
||||
#endif
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
|
||||
|
||||
|
||||
@@ -61,9 +61,12 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
|
||||
int err;
|
||||
struct socket *sock = sock_from_file(file, &err);
|
||||
|
||||
if (sock)
|
||||
sock->sk->sk_classid = (u32)(unsigned long)v;
|
||||
|
||||
if (sock) {
|
||||
spin_lock(&cgroup_sk_update_lock);
|
||||
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
|
||||
(unsigned long)v);
|
||||
spin_unlock(&cgroup_sk_update_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -100,6 +103,8 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
{
|
||||
struct cgroup_cls_state *cs = css_cls_state(css);
|
||||
|
||||
cgroup_sk_alloc_disable();
|
||||
|
||||
cs->classid = (u32)value;
|
||||
|
||||
update_classid(css, (void *)(unsigned long)cs->classid);
|
||||
|
||||
@@ -27,6 +27,12 @@
|
||||
|
||||
#include <linux/fdtable.h>
|
||||
|
||||
/*
|
||||
* netprio allocates per-net_device priomap array which is indexed by
|
||||
* css->id. Limiting css ID to 16bits doesn't lose anything.
|
||||
*/
|
||||
#define NETPRIO_ID_MAX USHRT_MAX
|
||||
|
||||
#define PRIOMAP_MIN_SZ 128
|
||||
|
||||
/*
|
||||
@@ -144,6 +150,9 @@ static int cgrp_css_online(struct cgroup_subsys_state *css)
|
||||
struct net_device *dev;
|
||||
int ret = 0;
|
||||
|
||||
if (css->id > NETPRIO_ID_MAX)
|
||||
return -ENOSPC;
|
||||
|
||||
if (!parent_css)
|
||||
return 0;
|
||||
|
||||
@@ -200,6 +209,8 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
cgroup_sk_alloc_disable();
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
ret = netprio_set_prio(of_css(of), dev, prio);
|
||||
@@ -213,8 +224,12 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
|
||||
{
|
||||
int err;
|
||||
struct socket *sock = sock_from_file(file, &err);
|
||||
if (sock)
|
||||
sock->sk->sk_cgrp_prioidx = (u32)(unsigned long)v;
|
||||
if (sock) {
|
||||
spin_lock(&cgroup_sk_update_lock);
|
||||
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
|
||||
(unsigned long)v);
|
||||
spin_unlock(&cgroup_sk_update_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -2787,7 +2787,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
|
||||
} else {
|
||||
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
|
||||
}
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
if (likely(skb))
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
return skb;
|
||||
}
|
||||
@@ -2898,7 +2900,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
||||
|
||||
if (!(pkt_dev->flags & F_UDPCSUM)) {
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
} else if (odev->features & NETIF_F_V4_CSUM) {
|
||||
} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) {
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
skb->csum = 0;
|
||||
udp4_hwcsum(skb, iph->saddr, iph->daddr);
|
||||
@@ -3032,7 +3034,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
||||
|
||||
if (!(pkt_dev->flags & F_UDPCSUM)) {
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
} else if (odev->features & NETIF_F_V6_CSUM) {
|
||||
} else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) {
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
skb->csum_start = skb_transport_header(skb) - skb->head;
|
||||
skb->csum_offset = offsetof(struct udphdr, check);
|
||||
|
||||
@@ -1027,6 +1027,7 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int err;
|
||||
struct switchdev_attr attr = {
|
||||
.orig_dev = dev,
|
||||
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
|
||||
.flags = SWITCHDEV_F_NO_RECURSE,
|
||||
};
|
||||
@@ -2563,7 +2564,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
u8 *addr, u16 vid, u32 pid, u32 seq,
|
||||
int type, unsigned int flags,
|
||||
int nlflags)
|
||||
int nlflags, u16 ndm_state)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct ndmsg *ndm;
|
||||
@@ -2579,7 +2580,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
|
||||
ndm->ndm_flags = flags;
|
||||
ndm->ndm_type = 0;
|
||||
ndm->ndm_ifindex = dev->ifindex;
|
||||
ndm->ndm_state = NUD_PERMANENT;
|
||||
ndm->ndm_state = ndm_state;
|
||||
|
||||
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
|
||||
goto nla_put_failure;
|
||||
@@ -2600,7 +2601,8 @@ static inline size_t rtnl_fdb_nlmsg_size(void)
|
||||
return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
|
||||
}
|
||||
|
||||
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type)
|
||||
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
|
||||
u16 ndm_state)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct sk_buff *skb;
|
||||
@@ -2611,7 +2613,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type)
|
||||
goto errout;
|
||||
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
|
||||
0, 0, type, NTF_SELF, 0);
|
||||
0, 0, type, NTF_SELF, 0, ndm_state);
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
goto errout;
|
||||
@@ -2746,7 +2748,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
nlh->nlmsg_flags);
|
||||
|
||||
if (!err) {
|
||||
rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH);
|
||||
rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
|
||||
ndm->ndm_state);
|
||||
ndm->ndm_flags &= ~NTF_SELF;
|
||||
}
|
||||
}
|
||||
@@ -2847,7 +2850,8 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
|
||||
|
||||
if (!err) {
|
||||
rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH);
|
||||
rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
|
||||
ndm->ndm_state);
|
||||
ndm->ndm_flags &= ~NTF_SELF;
|
||||
}
|
||||
}
|
||||
@@ -2875,7 +2879,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
|
||||
portid, seq,
|
||||
RTM_NEWNEIGH, NTF_SELF,
|
||||
NLM_F_MULTI);
|
||||
NLM_F_MULTI, NUD_PERMANENT);
|
||||
if (err < 0)
|
||||
return err;
|
||||
skip:
|
||||
@@ -3347,7 +3351,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
rtnl_doit_func doit;
|
||||
int sz_idx, kind;
|
||||
int kind;
|
||||
int family;
|
||||
int type;
|
||||
int err;
|
||||
@@ -3363,7 +3367,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
return 0;
|
||||
|
||||
family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
|
||||
sz_idx = type>>2;
|
||||
kind = type&3;
|
||||
|
||||
if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
|
||||
|
||||
@@ -289,8 +289,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
|
||||
/* Bump the usage count and install the file. */
|
||||
sock = sock_from_file(fp[i], &err);
|
||||
if (sock) {
|
||||
sock_update_netprioidx(sock->sk);
|
||||
sock_update_classid(sock->sk);
|
||||
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
|
||||
sock_update_classid(&sock->sk->sk_cgrp_data);
|
||||
}
|
||||
fd_install(new_fd, get_file(fp[i]));
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@
|
||||
#include <linux/sock_diag.h>
|
||||
|
||||
#include <linux/filter.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
#include <trace/events/sock.h>
|
||||
|
||||
@@ -932,6 +933,32 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
||||
}
|
||||
break;
|
||||
|
||||
case SO_ATTACH_REUSEPORT_CBPF:
|
||||
ret = -EINVAL;
|
||||
if (optlen == sizeof(struct sock_fprog)) {
|
||||
struct sock_fprog fprog;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(&fprog, optval, sizeof(fprog)))
|
||||
break;
|
||||
|
||||
ret = sk_reuseport_attach_filter(&fprog, sk);
|
||||
}
|
||||
break;
|
||||
|
||||
case SO_ATTACH_REUSEPORT_EBPF:
|
||||
ret = -EINVAL;
|
||||
if (optlen == sizeof(u32)) {
|
||||
u32 ufd;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(&ufd, optval, sizeof(ufd)))
|
||||
break;
|
||||
|
||||
ret = sk_reuseport_attach_bpf(ufd, sk);
|
||||
}
|
||||
break;
|
||||
|
||||
case SO_DETACH_FILTER:
|
||||
ret = sk_detach_filter(sk);
|
||||
break;
|
||||
@@ -1362,6 +1389,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
if (!try_module_get(prot->owner))
|
||||
goto out_free_sec;
|
||||
sk_tx_queue_clear(sk);
|
||||
cgroup_sk_alloc(&sk->sk_cgrp_data);
|
||||
}
|
||||
|
||||
return sk;
|
||||
@@ -1384,6 +1412,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
owner = prot->owner;
|
||||
slab = prot->slab;
|
||||
|
||||
cgroup_sk_free(&sk->sk_cgrp_data);
|
||||
security_sk_free(sk);
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
@@ -1392,17 +1421,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
module_put(owner);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
|
||||
void sock_update_netprioidx(struct sock *sk)
|
||||
{
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
sk->sk_cgrp_prioidx = task_netprioidx(current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* sk_alloc - All socket objects are allocated here
|
||||
* @net: the applicable net namespace
|
||||
@@ -1431,8 +1449,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
||||
sock_net_set(sk, net);
|
||||
atomic_set(&sk->sk_wmem_alloc, 1);
|
||||
|
||||
sock_update_classid(sk);
|
||||
sock_update_netprioidx(sk);
|
||||
sock_update_classid(&sk->sk_cgrp_data);
|
||||
sock_update_netprioidx(&sk->sk_cgrp_data);
|
||||
}
|
||||
|
||||
return sk;
|
||||
@@ -1452,6 +1470,8 @@ void sk_destruct(struct sock *sk)
|
||||
sk_filter_uncharge(sk, filter);
|
||||
RCU_INIT_POINTER(sk->sk_filter, NULL);
|
||||
}
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_detach_sock(sk);
|
||||
|
||||
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
|
||||
|
||||
@@ -2281,7 +2301,7 @@ static void sock_def_wakeup(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_all(&wq->wait);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@@ -2292,7 +2312,7 @@ static void sock_def_error_report(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_poll(&wq->wait, POLLERR);
|
||||
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
|
||||
rcu_read_unlock();
|
||||
@@ -2304,7 +2324,7 @@ static void sock_def_readable(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
@@ -2322,7 +2342,7 @@ static void sock_def_write_space(struct sock *sk)
|
||||
*/
|
||||
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
|
||||
|
||||
@@ -214,7 +214,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_diag_unregister);
|
||||
|
||||
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
int err;
|
||||
struct sock_diag_req *req = nlmsg_data(nlh);
|
||||
@@ -234,8 +234,12 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
hndl = sock_diag_handlers[req->sdiag_family];
|
||||
if (hndl == NULL)
|
||||
err = -ENOENT;
|
||||
else
|
||||
else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
|
||||
err = hndl->dump(skb, nlh);
|
||||
else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
|
||||
err = hndl->destroy(skb, nlh);
|
||||
else
|
||||
err = -EOPNOTSUPP;
|
||||
mutex_unlock(&sock_diag_table_mutex);
|
||||
|
||||
return err;
|
||||
@@ -261,7 +265,8 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
|
||||
return ret;
|
||||
case SOCK_DIAG_BY_FAMILY:
|
||||
return __sock_diag_rcv_msg(skb, nlh);
|
||||
case SOCK_DESTROY:
|
||||
return __sock_diag_cmd(skb, nlh);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -295,6 +300,18 @@ static int sock_diag_bind(struct net *net, int group)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sock_diag_destroy(struct sock *sk, int err)
|
||||
{
|
||||
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!sk->sk_prot->diag_destroy)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sk->sk_prot->diag_destroy(sk, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_diag_destroy);
|
||||
|
||||
static int __net_init diag_net_init(struct net *net)
|
||||
{
|
||||
struct netlink_kernel_cfg cfg = {
|
||||
|
||||
251
net/core/sock_reuseport.c
Normal file
251
net/core/sock_reuseport.c
Normal file
@@ -0,0 +1,251 @@
|
||||
/*
|
||||
* To speed up listener socket lookup, create an array to store all sockets
|
||||
* listening on the same port. This allows a decision to be made after finding
|
||||
* the first socket. An optional BPF program can also be configured for
|
||||
* selecting the socket index from the array of available sockets.
|
||||
*/
|
||||
|
||||
#include <net/sock_reuseport.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#define INIT_SOCKS 128
|
||||
|
||||
static DEFINE_SPINLOCK(reuseport_lock);
|
||||
|
||||
static struct sock_reuseport *__reuseport_alloc(u16 max_socks)
|
||||
{
|
||||
size_t size = sizeof(struct sock_reuseport) +
|
||||
sizeof(struct sock *) * max_socks;
|
||||
struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
|
||||
|
||||
if (!reuse)
|
||||
return NULL;
|
||||
|
||||
reuse->max_socks = max_socks;
|
||||
|
||||
RCU_INIT_POINTER(reuse->prog, NULL);
|
||||
return reuse;
|
||||
}
|
||||
|
||||
int reuseport_alloc(struct sock *sk)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
/* bh lock used since this function call may precede hlist lock in
|
||||
* soft irq of receive path or setsockopt from process context
|
||||
*/
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
"multiple allocations for the same socket");
|
||||
reuse = __reuseport_alloc(INIT_SOCKS);
|
||||
if (!reuse) {
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
reuse->socks[0] = sk;
|
||||
reuse->num_socks = 1;
|
||||
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
|
||||
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_alloc);
|
||||
|
||||
static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
|
||||
{
|
||||
struct sock_reuseport *more_reuse;
|
||||
u32 more_socks_size, i;
|
||||
|
||||
more_socks_size = reuse->max_socks * 2U;
|
||||
if (more_socks_size > U16_MAX)
|
||||
return NULL;
|
||||
|
||||
more_reuse = __reuseport_alloc(more_socks_size);
|
||||
if (!more_reuse)
|
||||
return NULL;
|
||||
|
||||
more_reuse->max_socks = more_socks_size;
|
||||
more_reuse->num_socks = reuse->num_socks;
|
||||
more_reuse->prog = reuse->prog;
|
||||
|
||||
memcpy(more_reuse->socks, reuse->socks,
|
||||
reuse->num_socks * sizeof(struct sock *));
|
||||
|
||||
for (i = 0; i < reuse->num_socks; ++i)
|
||||
rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
|
||||
more_reuse);
|
||||
|
||||
/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
|
||||
* that reuse and more_reuse can temporarily share a reference
|
||||
* to prog.
|
||||
*/
|
||||
kfree_rcu(reuse, rcu);
|
||||
return more_reuse;
|
||||
}
|
||||
|
||||
/**
|
||||
* reuseport_add_sock - Add a socket to the reuseport group of another.
|
||||
* @sk: New socket to add to the group.
|
||||
* @sk2: Socket belonging to the existing reuseport group.
|
||||
* May return ENOMEM and not add socket to group under memory pressure.
|
||||
*/
|
||||
int reuseport_add_sock(struct sock *sk, const struct sock *sk2)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
"socket already in reuseport group");
|
||||
|
||||
if (reuse->num_socks == reuse->max_socks) {
|
||||
reuse = reuseport_grow(reuse);
|
||||
if (!reuse) {
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
reuse->socks[reuse->num_socks] = sk;
|
||||
/* paired with smp_rmb() in reuseport_select_sock() */
|
||||
smp_wmb();
|
||||
reuse->num_socks++;
|
||||
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
|
||||
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_add_sock);
|
||||
|
||||
static void reuseport_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
reuse = container_of(head, struct sock_reuseport, rcu);
|
||||
if (reuse->prog)
|
||||
bpf_prog_destroy(reuse->prog);
|
||||
kfree(reuse);
|
||||
}
|
||||
|
||||
void reuseport_detach_sock(struct sock *sk)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
|
||||
|
||||
for (i = 0; i < reuse->num_socks; i++) {
|
||||
if (reuse->socks[i] == sk) {
|
||||
reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
|
||||
reuse->num_socks--;
|
||||
if (reuse->num_socks == 0)
|
||||
call_rcu(&reuse->rcu, reuseport_free_rcu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_detach_sock);
|
||||
|
||||
static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
|
||||
struct bpf_prog *prog, struct sk_buff *skb,
|
||||
int hdr_len)
|
||||
{
|
||||
struct sk_buff *nskb = NULL;
|
||||
u32 index;
|
||||
|
||||
if (skb_shared(skb)) {
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return NULL;
|
||||
skb = nskb;
|
||||
}
|
||||
|
||||
/* temporarily advance data past protocol header */
|
||||
if (!pskb_pull(skb, hdr_len)) {
|
||||
kfree_skb(nskb);
|
||||
return NULL;
|
||||
}
|
||||
index = bpf_prog_run_save_cb(prog, skb);
|
||||
__skb_push(skb, hdr_len);
|
||||
|
||||
consume_skb(nskb);
|
||||
|
||||
if (index >= socks)
|
||||
return NULL;
|
||||
|
||||
return reuse->socks[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
|
||||
* @sk: First socket in the group.
|
||||
* @hash: When no BPF filter is available, use this hash to select.
|
||||
* @skb: skb to run through BPF filter.
|
||||
* @hdr_len: BPF filter expects skb data pointer at payload data. If
|
||||
* the skb does not yet point at the payload, this parameter represents
|
||||
* how far the pointer needs to advance to reach the payload.
|
||||
* Returns a socket that should receive the packet (or NULL on error).
|
||||
*/
|
||||
struct sock *reuseport_select_sock(struct sock *sk,
|
||||
u32 hash,
|
||||
struct sk_buff *skb,
|
||||
int hdr_len)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
struct bpf_prog *prog;
|
||||
struct sock *sk2 = NULL;
|
||||
u16 socks;
|
||||
|
||||
rcu_read_lock();
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
|
||||
/* if memory allocation failed or add call is not yet complete */
|
||||
if (!reuse)
|
||||
goto out;
|
||||
|
||||
prog = rcu_dereference(reuse->prog);
|
||||
socks = READ_ONCE(reuse->num_socks);
|
||||
if (likely(socks)) {
|
||||
/* paired with smp_wmb() in reuseport_add_sock() */
|
||||
smp_rmb();
|
||||
|
||||
if (prog && skb)
|
||||
sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
|
||||
else
|
||||
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return sk2;
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_select_sock);
|
||||
|
||||
struct bpf_prog *
|
||||
reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
struct bpf_prog *old_prog;
|
||||
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
old_prog = rcu_dereference_protected(reuse->prog,
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
rcu_assign_pointer(reuse->prog, prog);
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
||||
return old_prog;
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_attach_prog);
|
||||
@@ -35,7 +35,7 @@ void sk_stream_write_space(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_poll(&wq->wait, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
|
||||
@@ -201,7 +201,7 @@ void dccp_write_space(struct sock *sk)
|
||||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
/* Should agree with poll, otherwise some programs break */
|
||||
if (sock_writeable(sk))
|
||||
|
||||
@@ -21,8 +21,10 @@
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/phy_fixed.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include "dsa_priv.h"
|
||||
|
||||
char dsa_driver_version[] = "0.1";
|
||||
@@ -437,7 +439,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
|
||||
if (of_phy_is_fixed_link(port_dn)) {
|
||||
phydev = of_phy_find_device(port_dn);
|
||||
if (phydev) {
|
||||
int addr = phydev->addr;
|
||||
int addr = phydev->mdio.addr;
|
||||
|
||||
phy_device_free(phydev);
|
||||
of_node_put(port_dn);
|
||||
@@ -454,8 +456,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
|
||||
if (!ds->ports[port])
|
||||
continue;
|
||||
|
||||
unregister_netdev(ds->ports[port]);
|
||||
free_netdev(ds->ports[port]);
|
||||
dsa_slave_destroy(ds->ports[port]);
|
||||
}
|
||||
|
||||
mdiobus_unregister(ds->slave_mii_bus);
|
||||
@@ -506,33 +507,6 @@ static int dsa_switch_resume(struct dsa_switch *ds)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* link polling *************************************************************/
|
||||
static void dsa_link_poll_work(struct work_struct *ugly)
|
||||
{
|
||||
struct dsa_switch_tree *dst;
|
||||
int i;
|
||||
|
||||
dst = container_of(ugly, struct dsa_switch_tree, link_poll_work);
|
||||
|
||||
for (i = 0; i < dst->pd->nr_chips; i++) {
|
||||
struct dsa_switch *ds = dst->ds[i];
|
||||
|
||||
if (ds != NULL && ds->drv->poll_link != NULL)
|
||||
ds->drv->poll_link(ds);
|
||||
}
|
||||
|
||||
mod_timer(&dst->link_poll_timer, round_jiffies(jiffies + HZ));
|
||||
}
|
||||
|
||||
static void dsa_link_poll_timer(unsigned long _dst)
|
||||
{
|
||||
struct dsa_switch_tree *dst = (void *)_dst;
|
||||
|
||||
schedule_work(&dst->link_poll_work);
|
||||
}
|
||||
|
||||
|
||||
/* platform driver init and cleanup *****************************************/
|
||||
static int dev_is_class(struct device *dev, void *class)
|
||||
{
|
||||
@@ -688,6 +662,9 @@ static int dsa_of_probe(struct device *dev)
|
||||
const char *port_name;
|
||||
int chip_index, port_index;
|
||||
const unsigned int *sw_addr, *port_reg;
|
||||
int gpio;
|
||||
enum of_gpio_flags of_flags;
|
||||
unsigned long flags;
|
||||
u32 eeprom_len;
|
||||
int ret;
|
||||
|
||||
@@ -766,6 +743,19 @@ static int dsa_of_probe(struct device *dev)
|
||||
put_device(cd->host_dev);
|
||||
cd->host_dev = &mdio_bus_switch->dev;
|
||||
}
|
||||
gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
|
||||
&of_flags);
|
||||
if (gpio_is_valid(gpio)) {
|
||||
flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
|
||||
GPIOF_ACTIVE_LOW : 0);
|
||||
ret = devm_gpio_request_one(dev, gpio, flags,
|
||||
"switch_reset");
|
||||
if (ret)
|
||||
goto out_free_chip;
|
||||
|
||||
cd->reset = gpio_to_desc(gpio);
|
||||
gpiod_direction_output(cd->reset, 0);
|
||||
}
|
||||
|
||||
for_each_available_child_of_node(child, port) {
|
||||
port_reg = of_get_property(port, "reg", NULL);
|
||||
@@ -859,8 +849,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
|
||||
}
|
||||
|
||||
dst->ds[i] = ds;
|
||||
if (ds->drv->poll_link != NULL)
|
||||
dst->link_poll_needed = 1;
|
||||
|
||||
++configured;
|
||||
}
|
||||
@@ -879,15 +867,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
|
||||
wmb();
|
||||
dev->dsa_ptr = (void *)dst;
|
||||
|
||||
if (dst->link_poll_needed) {
|
||||
INIT_WORK(&dst->link_poll_work, dsa_link_poll_work);
|
||||
init_timer(&dst->link_poll_timer);
|
||||
dst->link_poll_timer.data = (unsigned long)dst;
|
||||
dst->link_poll_timer.function = dsa_link_poll_timer;
|
||||
dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
|
||||
add_timer(&dst->link_poll_timer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -939,8 +918,10 @@ static int dsa_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, dst);
|
||||
|
||||
ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_put(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -954,17 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dst->link_poll_needed)
|
||||
del_timer_sync(&dst->link_poll_timer);
|
||||
|
||||
flush_work(&dst->link_poll_work);
|
||||
|
||||
for (i = 0; i < dst->pd->nr_chips; i++) {
|
||||
struct dsa_switch *ds = dst->ds[i];
|
||||
|
||||
if (ds)
|
||||
dsa_switch_destroy(ds);
|
||||
}
|
||||
|
||||
dev_put(dst->master_netdev);
|
||||
}
|
||||
|
||||
static int dsa_remove(struct platform_device *pdev)
|
||||
@@ -1010,6 +988,14 @@ static int dsa_suspend(struct device *d)
|
||||
struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
|
||||
int i, ret = 0;
|
||||
|
||||
dst->master_netdev->dsa_ptr = NULL;
|
||||
|
||||
/* If we used a tagging format that doesn't have an ethertype
|
||||
* field, make sure that all packets from this point get sent
|
||||
* without the tag and go through the regular receive path.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
for (i = 0; i < dst->pd->nr_chips; i++) {
|
||||
struct dsa_switch *ds = dst->ds[i];
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ extern const struct dsa_device_ops notag_netdev_ops;
|
||||
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
|
||||
int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
|
||||
int port, char *name);
|
||||
void dsa_slave_destroy(struct net_device *slave_dev);
|
||||
int dsa_slave_suspend(struct net_device *slave_dev);
|
||||
int dsa_slave_resume(struct net_device *slave_dev);
|
||||
int dsa_slave_netdevice_event(struct notifier_block *unused,
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/phy_fixed.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/mdio.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/switchdev.h>
|
||||
#include <linux/if_bridge.h>
|
||||
@@ -997,7 +998,7 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
|
||||
{
|
||||
struct dsa_switch *ds = p->parent;
|
||||
|
||||
p->phy = ds->slave_mii_bus->phy_map[addr];
|
||||
p->phy = mdiobus_get_phy(ds->slave_mii_bus, addr);
|
||||
if (!p->phy) {
|
||||
netdev_err(slave_dev, "no phy at %d\n", addr);
|
||||
return -ENODEV;
|
||||
@@ -1080,11 +1081,10 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
|
||||
netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
|
||||
p->phy->addr, p->phy->drv->name);
|
||||
}
|
||||
|
||||
phy_attached_info(p->phy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1189,13 +1189,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
|
||||
p->old_link = -1;
|
||||
p->old_duplex = -1;
|
||||
|
||||
ret = dsa_slave_phy_setup(p, slave_dev);
|
||||
if (ret) {
|
||||
netdev_err(master, "error %d setting up slave phy\n", ret);
|
||||
free_netdev(slave_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ds->ports[port] = slave_dev;
|
||||
ret = register_netdev(slave_dev);
|
||||
if (ret) {
|
||||
@@ -1209,9 +1202,27 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
|
||||
|
||||
netif_carrier_off(slave_dev);
|
||||
|
||||
ret = dsa_slave_phy_setup(p, slave_dev);
|
||||
if (ret) {
|
||||
netdev_err(master, "error %d setting up slave phy\n", ret);
|
||||
free_netdev(slave_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dsa_slave_destroy(struct net_device *slave_dev)
|
||||
{
|
||||
struct dsa_slave_priv *p = netdev_priv(slave_dev);
|
||||
|
||||
netif_carrier_off(slave_dev);
|
||||
if (p->phy)
|
||||
phy_disconnect(p->phy);
|
||||
unregister_netdev(slave_dev);
|
||||
free_netdev(slave_dev);
|
||||
}
|
||||
|
||||
static bool dsa_slave_dev_check(struct net_device *dev)
|
||||
{
|
||||
return dev->netdev_ops == &dsa_slave_netdev_ops;
|
||||
|
||||
@@ -52,6 +52,8 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/pci.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/sock.h>
|
||||
@@ -485,3 +487,32 @@ static int __init eth_offload_init(void)
|
||||
}
|
||||
|
||||
fs_initcall(eth_offload_init);
|
||||
|
||||
unsigned char * __weak arch_get_platform_mac_address(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
|
||||
{
|
||||
const unsigned char *addr;
|
||||
struct device_node *dp;
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
dp = pci_device_to_OF_node(to_pci_dev(dev));
|
||||
else
|
||||
dp = dev->of_node;
|
||||
|
||||
addr = NULL;
|
||||
if (dp)
|
||||
addr = of_get_mac_address(dp);
|
||||
if (!addr)
|
||||
addr = arch_get_platform_mac_address();
|
||||
|
||||
if (!addr)
|
||||
return -ENODEV;
|
||||
|
||||
ether_addr_copy(mac_addr, addr);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(eth_platform_get_mac_address);
|
||||
|
||||
@@ -161,9 +161,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
|
||||
wdev->needed_headroom;
|
||||
ldev->needed_tailroom = wdev->needed_tailroom;
|
||||
|
||||
lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
|
||||
|
||||
ret = register_netdevice(ldev);
|
||||
ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154);
|
||||
if (ret < 0) {
|
||||
dev_put(wdev);
|
||||
return ret;
|
||||
@@ -180,7 +178,7 @@ static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
|
||||
ASSERT_RTNL();
|
||||
|
||||
wdev->ieee802154_ptr->lowpan_dev = NULL;
|
||||
unregister_netdevice(ldev);
|
||||
lowpan_unregister_netdevice(ldev);
|
||||
dev_put(wdev);
|
||||
}
|
||||
|
||||
|
||||
@@ -624,7 +624,6 @@ int __init lowpan_net_frag_init(void)
|
||||
lowpan_frags.hashfn = lowpan_hashfn;
|
||||
lowpan_frags.constructor = lowpan_frag_init;
|
||||
lowpan_frags.destructor = NULL;
|
||||
lowpan_frags.skb_free = NULL;
|
||||
lowpan_frags.qsize = sizeof(struct frag_queue);
|
||||
lowpan_frags.match = lowpan_frag_match;
|
||||
lowpan_frags.frag_expire = lowpan_frag_expire;
|
||||
|
||||
@@ -436,6 +436,19 @@ config INET_UDP_DIAG
|
||||
Support for UDP socket monitoring interface used by the ss tool.
|
||||
If unsure, say Y.
|
||||
|
||||
config INET_DIAG_DESTROY
|
||||
bool "INET: allow privileged process to administratively close sockets"
|
||||
depends on INET_DIAG
|
||||
default n
|
||||
---help---
|
||||
Provides a SOCK_DESTROY operation that allows privileged processes
|
||||
(e.g., a connection manager or a network administration tool such as
|
||||
ss) to close sockets opened by other processes. Closing a socket in
|
||||
this way interrupts any blocking read/write/connect operations on
|
||||
the socket and causes future socket calls to behave as if the socket
|
||||
had been disconnected.
|
||||
If unsure, say N.
|
||||
|
||||
menuconfig TCP_CONG_ADVANCED
|
||||
bool "TCP: advanced congestion control"
|
||||
---help---
|
||||
|
||||
@@ -498,7 +498,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
|
||||
if (cfg->udp_config.family == AF_INET) {
|
||||
err = udp_add_offload(&fou->udp_offloads);
|
||||
err = udp_add_offload(net, &fou->udp_offloads);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -350,17 +350,12 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
nlmsg_flags, unlh);
|
||||
}
|
||||
|
||||
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
|
||||
struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct inet_diag_req_v2 *req)
|
||||
struct sock *inet_diag_find_one_icsk(struct net *net,
|
||||
struct inet_hashinfo *hashinfo,
|
||||
const struct inet_diag_req_v2 *req)
|
||||
{
|
||||
struct net *net = sock_net(in_skb->sk);
|
||||
struct sk_buff *rep;
|
||||
struct sock *sk;
|
||||
int err;
|
||||
|
||||
err = -EINVAL;
|
||||
if (req->sdiag_family == AF_INET)
|
||||
sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
|
||||
req->id.idiag_dport, req->id.idiag_src[0],
|
||||
@@ -375,15 +370,33 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
|
||||
req->id.idiag_if);
|
||||
#endif
|
||||
else
|
||||
goto out_nosk;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
err = -ENOENT;
|
||||
if (!sk)
|
||||
goto out_nosk;
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
|
||||
if (err)
|
||||
goto out;
|
||||
if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
|
||||
sock_gen_put(sk);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
return sk;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
|
||||
|
||||
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
|
||||
struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct inet_diag_req_v2 *req)
|
||||
{
|
||||
struct net *net = sock_net(in_skb->sk);
|
||||
struct sk_buff *rep;
|
||||
struct sock *sk;
|
||||
int err;
|
||||
|
||||
sk = inet_diag_find_one_icsk(net, hashinfo, req);
|
||||
if (IS_ERR(sk))
|
||||
return PTR_ERR(sk);
|
||||
|
||||
rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
|
||||
if (!rep) {
|
||||
@@ -409,12 +422,11 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
|
||||
if (sk)
|
||||
sock_gen_put(sk);
|
||||
|
||||
out_nosk:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
|
||||
|
||||
static int inet_diag_get_exact(struct sk_buff *in_skb,
|
||||
static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct inet_diag_req_v2 *req)
|
||||
{
|
||||
@@ -424,8 +436,12 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
|
||||
handler = inet_diag_lock_handler(req->sdiag_protocol);
|
||||
if (IS_ERR(handler))
|
||||
err = PTR_ERR(handler);
|
||||
else
|
||||
else if (cmd == SOCK_DIAG_BY_FAMILY)
|
||||
err = handler->dump_one(in_skb, nlh, req);
|
||||
else if (cmd == SOCK_DESTROY && handler->destroy)
|
||||
err = handler->destroy(in_skb, req);
|
||||
else
|
||||
err = -EOPNOTSUPP;
|
||||
inet_diag_unlock_handler(handler);
|
||||
|
||||
return err;
|
||||
@@ -938,7 +954,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
|
||||
req.idiag_states = rc->idiag_states;
|
||||
req.id = rc->id;
|
||||
|
||||
return inet_diag_get_exact(in_skb, nlh, &req);
|
||||
return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
|
||||
}
|
||||
|
||||
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
@@ -972,7 +988,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
return inet_diag_get_exact_compat(skb, nlh);
|
||||
}
|
||||
|
||||
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
{
|
||||
int hdrlen = sizeof(struct inet_diag_req_v2);
|
||||
struct net *net = sock_net(skb->sk);
|
||||
@@ -980,7 +996,8 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
if (nlmsg_len(h) < hdrlen)
|
||||
return -EINVAL;
|
||||
|
||||
if (h->nlmsg_flags & NLM_F_DUMP) {
|
||||
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
|
||||
h->nlmsg_flags & NLM_F_DUMP) {
|
||||
if (nlmsg_attrlen(h, hdrlen)) {
|
||||
struct nlattr *attr;
|
||||
|
||||
@@ -999,7 +1016,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
}
|
||||
}
|
||||
|
||||
return inet_diag_get_exact(skb, h, nlmsg_data(h));
|
||||
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
|
||||
}
|
||||
|
||||
static
|
||||
@@ -1050,14 +1067,16 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
static const struct sock_diag_handler inet_diag_handler = {
|
||||
.family = AF_INET,
|
||||
.dump = inet_diag_handler_dump,
|
||||
.dump = inet_diag_handler_cmd,
|
||||
.get_info = inet_diag_handler_get_info,
|
||||
.destroy = inet_diag_handler_cmd,
|
||||
};
|
||||
|
||||
static const struct sock_diag_handler inet6_diag_handler = {
|
||||
.family = AF_INET6,
|
||||
.dump = inet_diag_handler_dump,
|
||||
.dump = inet_diag_handler_cmd,
|
||||
.get_info = inet_diag_handler_get_info,
|
||||
.destroy = inet_diag_handler_cmd,
|
||||
};
|
||||
|
||||
int inet_diag_register(const struct inet_diag_handler *h)
|
||||
|
||||
@@ -285,14 +285,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_kill);
|
||||
|
||||
static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (f->skb_free)
|
||||
f->skb_free(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
|
||||
{
|
||||
struct sk_buff *fp;
|
||||
@@ -309,7 +301,7 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
|
||||
struct sk_buff *xp = fp->next;
|
||||
|
||||
sum_truesize += fp->truesize;
|
||||
frag_kfree_skb(nf, f, fp);
|
||||
kfree_skb(fp);
|
||||
fp = xp;
|
||||
}
|
||||
sum = sum_truesize + f->qsize;
|
||||
|
||||
@@ -891,7 +891,6 @@ void __init ipfrag_init(void)
|
||||
ip4_frags.hashfn = ip4_hashfn;
|
||||
ip4_frags.constructor = ip4_frag_init;
|
||||
ip4_frags.destructor = ip4_frag_free;
|
||||
ip4_frags.skb_free = NULL;
|
||||
ip4_frags.qsize = sizeof(struct ipq);
|
||||
ip4_frags.match = ip4_frag_match;
|
||||
ip4_frags.frag_expire = ip_expire;
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/in6.h>
|
||||
@@ -562,10 +561,9 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
tunnel_id_to_key(tun_info->key.tun_id), 0);
|
||||
|
||||
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
||||
err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
|
||||
key->u.ipv4.dst, IPPROTO_GRE,
|
||||
key->tos, key->ttl, df, false);
|
||||
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
|
||||
|
||||
iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
|
||||
key->tos, key->ttl, df, false);
|
||||
return;
|
||||
|
||||
err_free_rt:
|
||||
|
||||
@@ -76,7 +76,6 @@
|
||||
#include <linux/igmp.h>
|
||||
#include <linux/netfilter_ipv4.h>
|
||||
#include <linux/netfilter_bridge.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
@@ -912,7 +911,7 @@ static int __ip_append_data(struct sock *sk,
|
||||
*/
|
||||
if (transhdrlen &&
|
||||
length + fragheaderlen <= mtu &&
|
||||
rt->dst.dev->features & NETIF_F_V4_CSUM &&
|
||||
rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
|
||||
!(flags & MSG_MORE) &&
|
||||
!exthdrlen)
|
||||
csummode = CHECKSUM_PARTIAL;
|
||||
@@ -921,7 +920,7 @@ static int __ip_append_data(struct sock *sk,
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
maxfraglen, flags);
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/inetdevice.h>
|
||||
@@ -657,7 +656,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct rtable *rt; /* Route to the other host */
|
||||
unsigned int max_headroom; /* The extra header space needed */
|
||||
__be32 dst;
|
||||
int err;
|
||||
bool connected;
|
||||
|
||||
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
||||
@@ -795,10 +793,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
return;
|
||||
}
|
||||
|
||||
err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol,
|
||||
tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
|
||||
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
|
||||
|
||||
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
|
||||
df, !net_eq(tunnel->net, dev_net(dev)));
|
||||
return;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/inetdevice.h>
|
||||
@@ -48,12 +47,13 @@
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/dst_metadata.h>
|
||||
|
||||
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
||||
__be32 src, __be32 dst, __u8 proto,
|
||||
__u8 tos, __u8 ttl, __be16 df, bool xnet)
|
||||
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
||||
__be32 src, __be32 dst, __u8 proto,
|
||||
__u8 tos, __u8 ttl, __be16 df, bool xnet)
|
||||
{
|
||||
int pkt_len = skb->len - skb_inner_network_offset(skb);
|
||||
struct net *net = dev_net(rt->dst.dev);
|
||||
struct net_device *dev = skb->dev;
|
||||
struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
@@ -82,7 +82,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
||||
err = ip_local_out(net, sk, skb);
|
||||
if (unlikely(net_xmit_eval(err)))
|
||||
pkt_len = 0;
|
||||
return pkt_len;
|
||||
iptunnel_xmit_stats(dev, pkt_len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iptunnel_xmit);
|
||||
|
||||
@@ -251,7 +251,7 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
|
||||
tun_info = lwt_tun_info(new_state);
|
||||
|
||||
if (tb[LWTUNNEL_IP_ID])
|
||||
tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
|
||||
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
|
||||
|
||||
if (tb[LWTUNNEL_IP_DST])
|
||||
tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
|
||||
@@ -266,7 +266,7 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
|
||||
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
|
||||
|
||||
if (tb[LWTUNNEL_IP_FLAGS])
|
||||
tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
|
||||
tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);
|
||||
|
||||
tun_info->mode = IP_TUNNEL_INFO_TX;
|
||||
tun_info->options_len = 0;
|
||||
@@ -281,12 +281,12 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
|
||||
{
|
||||
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
|
||||
|
||||
if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
|
||||
if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
|
||||
nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
|
||||
nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
|
||||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
|
||||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
|
||||
nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
|
||||
nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
@@ -346,7 +346,7 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
|
||||
tun_info = lwt_tun_info(new_state);
|
||||
|
||||
if (tb[LWTUNNEL_IP6_ID])
|
||||
tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
|
||||
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
|
||||
|
||||
if (tb[LWTUNNEL_IP6_DST])
|
||||
tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
|
||||
@@ -361,7 +361,7 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
|
||||
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
|
||||
|
||||
if (tb[LWTUNNEL_IP6_FLAGS])
|
||||
tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
|
||||
tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
|
||||
|
||||
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
|
||||
tun_info->options_len = 0;
|
||||
@@ -376,12 +376,12 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
|
||||
{
|
||||
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
|
||||
|
||||
if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
|
||||
if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
|
||||
nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
|
||||
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
|
||||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
|
||||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
|
||||
nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
|
||||
nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/netfilter_ipv4.h>
|
||||
#include <linux/if_ether.h>
|
||||
@@ -200,7 +199,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
err = dst_output(tunnel->net, skb->sk, skb);
|
||||
if (net_xmit_eval(err) == 0)
|
||||
err = skb->len;
|
||||
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
|
||||
iptunnel_xmit_stats(dev, err);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
tx_error_icmp:
|
||||
|
||||
@@ -65,15 +65,6 @@
|
||||
#include <net/checksum.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/* Define this to allow debugging output */
|
||||
#undef IPCONFIG_DEBUG
|
||||
|
||||
#ifdef IPCONFIG_DEBUG
|
||||
#define DBG(x) printk x
|
||||
#else
|
||||
#define DBG(x) do { } while(0)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IP_PNP_DHCP)
|
||||
#define IPCONFIG_DHCP
|
||||
#endif
|
||||
@@ -227,7 +218,7 @@ static int __init ic_open_devs(void)
|
||||
if (dev->mtu >= 364)
|
||||
able |= IC_BOOTP;
|
||||
else
|
||||
pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small",
|
||||
pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small\n",
|
||||
dev->name, dev->mtu);
|
||||
if (!(dev->flags & IFF_NOARP))
|
||||
able |= IC_RARP;
|
||||
@@ -254,8 +245,8 @@ static int __init ic_open_devs(void)
|
||||
else
|
||||
d->xid = 0;
|
||||
ic_proto_have_if |= able;
|
||||
DBG(("IP-Config: %s UP (able=%d, xid=%08x)\n",
|
||||
dev->name, able, d->xid));
|
||||
pr_debug("IP-Config: %s UP (able=%d, xid=%08x)\n",
|
||||
dev->name, able, d->xid);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,7 +302,7 @@ static void __init ic_close_devs(void)
|
||||
next = d->next;
|
||||
dev = d->dev;
|
||||
if (dev != ic_dev && !netdev_uses_dsa(dev)) {
|
||||
DBG(("IP-Config: Downing %s\n", dev->name));
|
||||
pr_debug("IP-Config: Downing %s\n", dev->name);
|
||||
dev_change_flags(dev, d->flags);
|
||||
}
|
||||
kfree(d);
|
||||
@@ -464,7 +455,8 @@ static int __init ic_defaults(void)
|
||||
&ic_myaddr);
|
||||
return -1;
|
||||
}
|
||||
printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask);
|
||||
pr_notice("IP-Config: Guessing netmask %pI4\n",
|
||||
&ic_netmask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -675,9 +667,7 @@ ic_dhcp_init_options(u8 *options)
|
||||
u8 *e = options;
|
||||
int len;
|
||||
|
||||
#ifdef IPCONFIG_DEBUG
|
||||
printk("DHCP: Sending message type %d\n", mt);
|
||||
#endif
|
||||
pr_debug("DHCP: Sending message type %d\n", mt);
|
||||
|
||||
memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */
|
||||
e += 4;
|
||||
@@ -847,7 +837,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
|
||||
else if (dev->type == ARPHRD_FDDI)
|
||||
b->htype = ARPHRD_ETHER;
|
||||
else {
|
||||
printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name);
|
||||
pr_warn("Unknown ARP type 0x%04x for device %s\n", dev->type,
|
||||
dev->name);
|
||||
b->htype = dev->type; /* can cause undefined behavior */
|
||||
}
|
||||
|
||||
@@ -904,14 +895,12 @@ static void __init ic_do_bootp_ext(u8 *ext)
|
||||
int i;
|
||||
__be16 mtu;
|
||||
|
||||
#ifdef IPCONFIG_DEBUG
|
||||
u8 *c;
|
||||
|
||||
printk("DHCP/BOOTP: Got extension %d:",*ext);
|
||||
pr_debug("DHCP/BOOTP: Got extension %d:", *ext);
|
||||
for (c=ext+2; c<ext+2+ext[1]; c++)
|
||||
printk(" %02x", *c);
|
||||
printk("\n");
|
||||
#endif
|
||||
pr_debug(" %02x", *c);
|
||||
pr_debug("\n");
|
||||
|
||||
switch (*ext++) {
|
||||
case 1: /* Subnet mask */
|
||||
@@ -1080,9 +1069,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef IPCONFIG_DEBUG
|
||||
printk("DHCP: Got message type %d\n", mt);
|
||||
#endif
|
||||
pr_debug("DHCP: Got message type %d\n", mt);
|
||||
|
||||
switch (mt) {
|
||||
case DHCPOFFER:
|
||||
@@ -1095,10 +1082,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
||||
/* Let's accept that offer. */
|
||||
ic_myaddr = b->your_ip;
|
||||
ic_servaddr = server_id;
|
||||
#ifdef IPCONFIG_DEBUG
|
||||
printk("DHCP: Offered address %pI4 by server %pI4\n",
|
||||
&ic_myaddr, &b->iph.saddr);
|
||||
#endif
|
||||
pr_debug("DHCP: Offered address %pI4 by server %pI4\n",
|
||||
&ic_myaddr, &b->iph.saddr);
|
||||
/* The DHCP indicated server address takes
|
||||
* precedence over the bootp header one if
|
||||
* they are different.
|
||||
@@ -1295,11 +1280,10 @@ static int __init ic_dynamic(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
printk("IP-Config: Got %s answer from %pI4, ",
|
||||
pr_info("IP-Config: Got %s answer from %pI4, my address is %pI4\n",
|
||||
((ic_got_reply & IC_RARP) ? "RARP"
|
||||
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
|
||||
&ic_addrservaddr);
|
||||
pr_cont("my address is %pI4\n", &ic_myaddr);
|
||||
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
|
||||
&ic_addrservaddr, &ic_myaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1426,7 +1410,7 @@ static int __init ip_auto_config(void)
|
||||
if (!ic_enable)
|
||||
return 0;
|
||||
|
||||
DBG(("IP-Config: Entered.\n"));
|
||||
pr_debug("IP-Config: Entered.\n");
|
||||
#ifdef IPCONFIG_DYNAMIC
|
||||
try_try_again:
|
||||
#endif
|
||||
@@ -1542,7 +1526,7 @@ static int __init ip_auto_config(void)
|
||||
pr_cont(", mtu=%d", ic_dev_mtu);
|
||||
for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
|
||||
if (ic_nameservers[i] != NONE) {
|
||||
pr_info(" nameserver%u=%pI4",
|
||||
pr_cont(" nameserver%u=%pI4",
|
||||
i, &ic_nameservers[i]);
|
||||
break;
|
||||
}
|
||||
@@ -1585,7 +1569,7 @@ static int __init ic_proto_name(char *name)
|
||||
return 1;
|
||||
*v = 0;
|
||||
if (kstrtou8(client_id, 0, dhcp_client_identifier))
|
||||
DBG("DHCP: Invalid client identifier type\n");
|
||||
pr_debug("DHCP: Invalid client identifier type\n");
|
||||
strncpy(dhcp_client_identifier + 1, v + 1, 251);
|
||||
*v = ',';
|
||||
}
|
||||
@@ -1644,7 +1628,7 @@ static int __init ip_auto_config_setup(char *addrs)
|
||||
if ((cp = strchr(ip, ':')))
|
||||
*cp++ = '\0';
|
||||
if (strlen(ip) > 0) {
|
||||
DBG(("IP-Config: Parameter #%d: `%s'\n", num, ip));
|
||||
pr_debug("IP-Config: Parameter #%d: `%s'\n", num, ip);
|
||||
switch (num) {
|
||||
case 0:
|
||||
if ((ic_myaddr = in_aton(ip)) == ANY)
|
||||
@@ -1716,7 +1700,7 @@ static int __init vendor_class_identifier_setup(char *addrs)
|
||||
if (strlcpy(vendor_class_identifier, addrs,
|
||||
sizeof(vendor_class_identifier))
|
||||
>= sizeof(vendor_class_identifier))
|
||||
pr_warn("DHCP: vendorclass too long, truncated to \"%s\"",
|
||||
pr_warn("DHCP: vendorclass too long, truncated to \"%s\"\n",
|
||||
vendor_class_identifier);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -103,7 +103,6 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/mroute.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/netfilter_ipv4.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
745
net/ipv4/ipmr.c
745
net/ipv4/ipmr.c
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user