mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 21:21:49 -04:00
Merge tag 'net-7.1-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Paolo Abeni:
"Including fixes from netfilter.
Previous releases - regressions:
- ethtool: fix NULL pointer dereference in phy_reply_size
- netfilter:
- allocate hook ops while under mutex
- close dangling table module init race
- restore nf_conntrack helper propagation via expectation
- tcp:
- fix potential UAF in reqsk_timer_handler().
- fix out-of-bounds access for twsk in tcp_ao_established_key().
- vsock: fix empty payload in tap skb for non-linear buffers
- hsr: fix NULL pointer dereference in hsr_get_node_data()
- eth:
- cortina: fix RX drop accounting
- ice: fix locking in ice_dcb_rebuild()
Previous releases - always broken:
- napi: avoid gro timer misfiring at end of busypoll
- sched:
- dualpi2: initialize timer earlier in dualpi2_init()
- sch_cbs: Call qdisc_reset for child qdisc
- shaper:
- fix ordering issue in net_shaper_commit()
- reject handle IDs exceeding internal bit-width
- ipv6: flowlabel: enforce per-netns limit for unprivileged callers
- tls: fix off-by-one in sg_chain entry count for wrapped sk_msg ring
- smc: avoid NULL deref of conn->lnk in smc_msg_event tracepoint
- sctp: revalidate list cursor after sctp_sendmsg_to_asoc() in SCTP_SENDALL
- batman-adv:
- reject new tp_meter sessions during teardown
- purge non-released claims
- eth:
- i40e: cleanup PTP registration on probe failure
- idpf: fix double free and use-after-free in aux device error paths
- ena: fix potential use-after-free in get_timestamp"
* tag 'net-7.1-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits)
net: phy: DP83TC811: add reading of abilities
net: tls: prevent chain-after-chain in plain text SG
net: tls: fix off-by-one in sg_chain entry count for wrapped sk_msg ring
net/smc: reject CHID-0 ACCEPT that matches an empty ism_dev slot
macsec: use rcu_work to defer TX SA crypto cleanup out of softirq
macsec: use rcu_work to defer RX SA crypto cleanup out of softirq
macsec: introduce dedicated workqueue for SA crypto cleanup
net: net_failover: Fix the deadlock in slave register
MAINTAINERS: update atlantic driver maintainer
selftests/tc-testing: Add QFQ/CBS qlen underflow test
net/sched: sch_cbs: Call qdisc_reset for child qdisc
FDDI: defza: Sanitise the reset safety timer
net: ethernet: ravb: Do not check URAM suspension when WoL is active
ethtool: fix ethnl_bitmap32_not_zero() bit interval semantics
net/smc: avoid NULL deref of conn->lnk in smc_msg_event tracepoint
net/smc: fix sleep-inside-lock in __smc_setsockopt() causing local DoS
net: atm: fix skb leak in sigd_send() default branch
net: ethtool: phy: avoid NULL deref when PHY driver is unbound
net: atlantic: preserve PCI wake-from-D3 on shutdown when WOL enabled
net: shaper: reject QUEUE scope handle with missing id
...
This commit is contained in:
@@ -69,6 +69,15 @@ properties:
|
||||
header:
|
||||
description: For C-compatible languages, header which already defines this value.
|
||||
type: string
|
||||
scope:
|
||||
description: |
|
||||
Visibility of this definition. "uapi" (default) renders into
|
||||
the uAPI header, "kernel" renders into the kernel-side
|
||||
generated header, "user" renders into the user-side
|
||||
generated header. When combined with `header:`, the
|
||||
definition is not rendered, and the named header is
|
||||
included only by code matching the scope.
|
||||
enum: [ uapi, kernel, user ]
|
||||
type:
|
||||
enum: [ const, enum, flags ]
|
||||
doc:
|
||||
|
||||
@@ -83,6 +83,15 @@ properties:
|
||||
header:
|
||||
description: For C-compatible languages, header which already defines this value.
|
||||
type: string
|
||||
scope:
|
||||
description: |
|
||||
Visibility of this definition. "uapi" (default) renders into
|
||||
the uAPI header, "kernel" renders into the kernel-side
|
||||
generated header, "user" renders into the user-side
|
||||
generated header. When combined with `header:`, the
|
||||
definition is not rendered, and the named header is
|
||||
included only by code matching the scope.
|
||||
enum: [ uapi, kernel, user ]
|
||||
type:
|
||||
enum: [ const, enum, flags, struct ] # Trim
|
||||
doc:
|
||||
|
||||
@@ -55,6 +55,15 @@ properties:
|
||||
header:
|
||||
description: For C-compatible languages, header which already defines this value.
|
||||
type: string
|
||||
scope:
|
||||
description: |
|
||||
Visibility of this definition. "uapi" (default) renders into
|
||||
the uAPI header, "kernel" renders into the kernel-side
|
||||
generated header, "user" renders into the user-side
|
||||
generated header. When combined with `header:`, the
|
||||
definition is not rendered, and the named header is
|
||||
included only by code matching the scope.
|
||||
enum: [ uapi, kernel, user ]
|
||||
type:
|
||||
enum: [ const, enum, flags ]
|
||||
doc:
|
||||
|
||||
@@ -87,6 +87,15 @@ properties:
|
||||
header:
|
||||
description: For C-compatible languages, header which already defines this value.
|
||||
type: string
|
||||
scope:
|
||||
description: |
|
||||
Visibility of this definition. "uapi" (default) renders into
|
||||
the uAPI header, "kernel" renders into the kernel-side
|
||||
generated header, "user" renders into the user-side
|
||||
generated header. When combined with `header:`, the
|
||||
definition is not rendered, and the named header is
|
||||
included only by code matching the scope.
|
||||
enum: [ uapi, kernel, user ]
|
||||
type:
|
||||
enum: [ const, enum, flags, struct ] # Trim
|
||||
doc:
|
||||
|
||||
@@ -33,6 +33,11 @@ doc: |
|
||||
@cap-get operation.
|
||||
|
||||
definitions:
|
||||
-
|
||||
type: const
|
||||
name: max-handle-id
|
||||
value: 0x3fffffe
|
||||
scope: kernel
|
||||
-
|
||||
type: enum
|
||||
name: scope
|
||||
@@ -140,6 +145,8 @@ attribute-sets:
|
||||
-
|
||||
name: id
|
||||
type: u32
|
||||
checks:
|
||||
max: max-handle-id
|
||||
doc: |
|
||||
Numeric identifier of a shaper. The id semantic depends on
|
||||
the scope. For @queue scope it's the queue id and for @node
|
||||
|
||||
14
MAINTAINERS
14
MAINTAINERS
@@ -68,6 +68,12 @@ Maintainers List
|
||||
first. When adding to this list, please keep the entries in
|
||||
alphabetical order.
|
||||
|
||||
3C509 NETWORK DRIVER
|
||||
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/3com/3c509.c
|
||||
|
||||
3C59X NETWORK DRIVER
|
||||
M: Steffen Klassert <klassert@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
@@ -2015,7 +2021,7 @@ F: Documentation/hwmon/aquacomputer_d5next.rst
|
||||
F: drivers/hwmon/aquacomputer_d5next.c
|
||||
|
||||
AQUANTIA ETHERNET DRIVER (atlantic)
|
||||
M: Igor Russkikh <irusskikh@marvell.com>
|
||||
M: Sukhdeep Singh <sukhdeeps@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://www.marvell.com/
|
||||
@@ -2024,7 +2030,7 @@ F: Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
|
||||
F: drivers/net/ethernet/aquantia/atlantic/
|
||||
|
||||
AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM
|
||||
M: Egor Pomozov <epomozov@marvell.com>
|
||||
M: Sukhdeep Singh <sukhdeeps@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.aquantia.com
|
||||
@@ -4181,8 +4187,8 @@ F: include/uapi/linux/sonet.h
|
||||
F: net/atm/
|
||||
|
||||
ATMEL MACB ETHERNET DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
M: Théo Lebrun <theo.lebrun@bootlin.com>
|
||||
R: Conor Dooley <conor.dooley@microchip.com>
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/cadence/
|
||||
|
||||
|
||||
@@ -1782,20 +1782,23 @@ void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
|
||||
|
||||
int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
|
||||
{
|
||||
volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
|
||||
const ktime_t zero_system_time = ktime_set(0, 0);
|
||||
struct ena_com_phc_info *phc = &ena_dev->phc;
|
||||
volatile struct ena_admin_phc_resp *resp;
|
||||
ktime_t expire_time;
|
||||
ktime_t block_time;
|
||||
unsigned long flags = 0;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&phc->lock, flags);
|
||||
|
||||
if (!phc->active) {
|
||||
spin_unlock_irqrestore(&phc->lock, flags);
|
||||
netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phc->lock, flags);
|
||||
resp = ena_dev->phc.virt_addr;
|
||||
|
||||
/* Check if PHC is in blocked state */
|
||||
if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
|
||||
|
||||
@@ -46,9 +46,12 @@ static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
|
||||
|
||||
spin_unlock_irqrestore(&phc_info->lock, flags);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*ts = ns_to_timespec64(timestamp_nsec);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ena_phc_settime64(struct ptp_clock_info *clock_info,
|
||||
|
||||
@@ -910,7 +910,9 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return of_mdiobus_register(mdio, mdio_np);
|
||||
ret = of_mdiobus_register(mdio, mdio_np);
|
||||
of_node_put(mdio_np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Mask out all PHYs from auto probing. */
|
||||
|
||||
@@ -371,7 +371,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
|
||||
pci_disable_device(pdev);
|
||||
|
||||
if (system_state == SYSTEM_POWER_OFF) {
|
||||
pci_wake_from_d3(pdev, false);
|
||||
pci_wake_from_d3(pdev, self->aq_hw->aq_nic_cfg->wol);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1271,7 +1271,6 @@ static const struct net_device_ops net_ops = {
|
||||
|
||||
static void __init reset_chip(struct net_device *dev)
|
||||
{
|
||||
#if !defined(CONFIG_MACH_MX31ADS)
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
unsigned long reset_start_time;
|
||||
|
||||
@@ -1298,7 +1297,6 @@ static void __init reset_chip(struct net_device *dev)
|
||||
while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
|
||||
time_before(jiffies, reset_start_time + 2))
|
||||
;
|
||||
#endif /* !CONFIG_MACH_MX31ADS */
|
||||
}
|
||||
|
||||
/* This is the real probe routine.
|
||||
|
||||
@@ -122,6 +122,9 @@ struct gemini_ethernet_port {
|
||||
struct napi_struct napi;
|
||||
struct hrtimer rx_coalesce_timer;
|
||||
unsigned int rx_coalesce_nsecs;
|
||||
struct sk_buff *rx_skb;
|
||||
unsigned int rx_frag_nr;
|
||||
|
||||
unsigned int freeq_refill;
|
||||
struct gmac_txq txq[TX_QUEUE_NUM];
|
||||
unsigned int txq_order;
|
||||
@@ -1442,10 +1445,11 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
unsigned short m = (1 << port->rxq_order) - 1;
|
||||
struct gemini_ethernet *geth = port->geth;
|
||||
void __iomem *ptr_reg = port->rxq_rwptr;
|
||||
unsigned int frag_nr = port->rx_frag_nr;
|
||||
struct sk_buff *skb = port->rx_skb;
|
||||
unsigned int frame_len, frag_len;
|
||||
struct gmac_rxdesc *rx = NULL;
|
||||
struct gmac_queue_page *gpage;
|
||||
static struct sk_buff *skb;
|
||||
union gmac_rxdesc_0 word0;
|
||||
union gmac_rxdesc_1 word1;
|
||||
union gmac_rxdesc_3 word3;
|
||||
@@ -1455,7 +1459,6 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
unsigned short r, w;
|
||||
union dma_rwptr rw;
|
||||
dma_addr_t mapping;
|
||||
int frag_nr = 0;
|
||||
|
||||
spin_lock_irqsave(&geth->irq_lock, flags);
|
||||
rw.bits32 = readl(ptr_reg);
|
||||
@@ -1491,10 +1494,11 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
|
||||
if (!gpage) {
|
||||
dev_err(geth->dev, "could not find mapping\n");
|
||||
port->stats.rx_dropped++;
|
||||
if (skb) {
|
||||
napi_free_frags(&port->napi);
|
||||
port->stats.rx_dropped++;
|
||||
skb = NULL;
|
||||
frag_nr = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -1504,6 +1508,8 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
if (skb) {
|
||||
napi_free_frags(&port->napi);
|
||||
port->stats.rx_dropped++;
|
||||
skb = NULL;
|
||||
frag_nr = 0;
|
||||
}
|
||||
|
||||
skb = gmac_skb_if_good_frame(port, word0, frame_len);
|
||||
@@ -1538,6 +1544,7 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
if (word3.bits32 & EOF_BIT) {
|
||||
napi_gro_frags(&port->napi);
|
||||
skb = NULL;
|
||||
frag_nr = 0;
|
||||
--budget;
|
||||
}
|
||||
continue;
|
||||
@@ -1546,6 +1553,7 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
if (skb) {
|
||||
napi_free_frags(&port->napi);
|
||||
skb = NULL;
|
||||
frag_nr = 0;
|
||||
}
|
||||
|
||||
if (mapping)
|
||||
@@ -1554,6 +1562,8 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
|
||||
port->stats.rx_dropped++;
|
||||
}
|
||||
|
||||
port->rx_skb = skb;
|
||||
port->rx_frag_nr = frag_nr;
|
||||
writew(r, ptr_reg);
|
||||
return budget;
|
||||
}
|
||||
@@ -1881,6 +1891,8 @@ static int gmac_stop(struct net_device *netdev)
|
||||
gmac_disable_tx_rx(netdev);
|
||||
gmac_stop_dma(port);
|
||||
napi_disable(&port->napi);
|
||||
port->rx_skb = NULL;
|
||||
port->rx_frag_nr = 0;
|
||||
|
||||
gmac_enable_irq(netdev, 0);
|
||||
gmac_cleanup_rxq(netdev);
|
||||
|
||||
@@ -1318,6 +1318,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
|
||||
void i40e_ptp_init(struct i40e_pf *pf);
|
||||
void i40e_ptp_stop(struct i40e_pf *pf);
|
||||
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
|
||||
void i40e_ptp_free_pins(struct i40e_pf *pf);
|
||||
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
|
||||
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
|
||||
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
|
||||
|
||||
@@ -16108,9 +16108,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
/* Unwind what we've done if something failed in the setup */
|
||||
err_vsis:
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
i40e_ptp_stop(pf);
|
||||
i40e_clear_interrupt_scheme(pf);
|
||||
kfree(pf->vsi);
|
||||
err_switch_setup:
|
||||
i40e_ptp_free_pins(pf);
|
||||
i40e_reset_interrupt_capability(pf);
|
||||
timer_shutdown_sync(&pf->service_timer);
|
||||
err_mac_addr:
|
||||
|
||||
@@ -940,12 +940,13 @@ int i40e_ptp_hwtstamp_get(struct net_device *netdev,
|
||||
*
|
||||
* Release memory allocated for PTP pins.
|
||||
**/
|
||||
static void i40e_ptp_free_pins(struct i40e_pf *pf)
|
||||
void i40e_ptp_free_pins(struct i40e_pf *pf)
|
||||
{
|
||||
if (i40e_is_ptp_pin_dev(&pf->hw)) {
|
||||
kfree(pf->ptp_pins);
|
||||
kfree(pf->ptp_caps.pin_config);
|
||||
pf->ptp_pins = NULL;
|
||||
pf->ptp_caps.pin_config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -537,14 +537,14 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
||||
struct ice_dcbx_cfg *err_cfg;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&pf->tc_mutex);
|
||||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
mutex_lock(&pf->tc_mutex);
|
||||
|
||||
if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
|
||||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
|
||||
|
||||
@@ -2523,6 +2523,8 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
|
||||
if (hw_idx < 0)
|
||||
goto unlock;
|
||||
hw_idx -= pf->dplls.base_rclk_idx;
|
||||
if (hw_idx >= ICE_DPLL_RCLK_NUM_MAX)
|
||||
goto unlock;
|
||||
|
||||
if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
|
||||
(!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
|
||||
@@ -2586,6 +2588,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
|
||||
hw_idx = ice_dpll_pin_get_parent_idx(p, parent_pin);
|
||||
if (hw_idx < 0)
|
||||
goto unlock;
|
||||
hw_idx -= pf->dplls.base_rclk_idx;
|
||||
if (hw_idx >= ICE_DPLL_RCLK_NUM_MAX)
|
||||
goto unlock;
|
||||
|
||||
ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
|
||||
extack);
|
||||
|
||||
@@ -8,6 +8,22 @@
|
||||
|
||||
#define ICE_DPLL_RCLK_NUM_MAX 4
|
||||
|
||||
#define ICE_CGU_R10 0x28
|
||||
#define ICE_CGU_R10_SYNCE_CLKO_SEL GENMASK(8, 5)
|
||||
#define ICE_CGU_R10_SYNCE_CLKODIV_M1 GENMASK(13, 9)
|
||||
#define ICE_CGU_R10_SYNCE_CLKODIV_LOAD BIT(14)
|
||||
#define ICE_CGU_R10_SYNCE_DCK_RST BIT(15)
|
||||
#define ICE_CGU_R10_SYNCE_ETHCLKO_SEL GENMASK(18, 16)
|
||||
#define ICE_CGU_R10_SYNCE_ETHDIV_M1 GENMASK(23, 19)
|
||||
#define ICE_CGU_R10_SYNCE_ETHDIV_LOAD BIT(24)
|
||||
#define ICE_CGU_R10_SYNCE_DCK2_RST BIT(25)
|
||||
#define ICE_CGU_R10_SYNCE_S_REF_CLK GENMASK(31, 27)
|
||||
|
||||
#define ICE_CGU_R11 0x2C
|
||||
#define ICE_CGU_R11_SYNCE_S_BYP_CLK GENMASK(6, 1)
|
||||
|
||||
#define ICE_CGU_BYPASS_MUX_OFFSET_E825C 3
|
||||
|
||||
/**
|
||||
* enum ice_dpll_pin_sw - enumerate ice software pin indices:
|
||||
* @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
|
||||
@@ -157,19 +173,3 @@ static inline void ice_dpll_deinit(struct ice_pf *pf) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#define ICE_CGU_R10 0x28
|
||||
#define ICE_CGU_R10_SYNCE_CLKO_SEL GENMASK(8, 5)
|
||||
#define ICE_CGU_R10_SYNCE_CLKODIV_M1 GENMASK(13, 9)
|
||||
#define ICE_CGU_R10_SYNCE_CLKODIV_LOAD BIT(14)
|
||||
#define ICE_CGU_R10_SYNCE_DCK_RST BIT(15)
|
||||
#define ICE_CGU_R10_SYNCE_ETHCLKO_SEL GENMASK(18, 16)
|
||||
#define ICE_CGU_R10_SYNCE_ETHDIV_M1 GENMASK(23, 19)
|
||||
#define ICE_CGU_R10_SYNCE_ETHDIV_LOAD BIT(24)
|
||||
#define ICE_CGU_R10_SYNCE_DCK2_RST BIT(25)
|
||||
#define ICE_CGU_R10_SYNCE_S_REF_CLK GENMASK(31, 27)
|
||||
|
||||
#define ICE_CGU_R11 0x2C
|
||||
#define ICE_CGU_R11_SYNCE_S_BYP_CLK GENMASK(6, 1)
|
||||
|
||||
#define ICE_CGU_BYPASS_MUX_OFFSET_E825C 3
|
||||
|
||||
@@ -8046,7 +8046,7 @@ int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
|
||||
ctx->info.q_opt_rss |=
|
||||
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
|
||||
ctx->info.q_opt_tc = vsi->info.q_opt_tc;
|
||||
ctx->info.q_opt_flags = vsi->info.q_opt_rss;
|
||||
ctx->info.q_opt_flags = vsi->info.q_opt_flags;
|
||||
|
||||
err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
|
||||
if (err) {
|
||||
|
||||
@@ -90,7 +90,10 @@ static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info,
|
||||
return 0;
|
||||
|
||||
err_aux_dev_add:
|
||||
ida_free(&idpf_idc_ida, adev->id);
|
||||
vdev_info->adev = NULL;
|
||||
auxiliary_device_uninit(adev);
|
||||
return ret;
|
||||
err_aux_dev_init:
|
||||
ida_free(&idpf_idc_ida, adev->id);
|
||||
err_ida_alloc:
|
||||
@@ -228,7 +231,10 @@ static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info)
|
||||
return 0;
|
||||
|
||||
err_aux_dev_add:
|
||||
ida_free(&idpf_idc_ida, adev->id);
|
||||
cdev_info->adev = NULL;
|
||||
auxiliary_device_uninit(adev);
|
||||
return ret;
|
||||
err_aux_dev_init:
|
||||
ida_free(&idpf_idc_ida, adev->id);
|
||||
err_ida_alloc:
|
||||
|
||||
@@ -952,6 +952,8 @@ int idpf_ptp_init(struct idpf_adapter *adapter)
|
||||
goto free_ptp;
|
||||
}
|
||||
|
||||
spin_lock_init(&adapter->ptp->read_dev_clk_lock);
|
||||
|
||||
err = idpf_ptp_create_clock(adapter);
|
||||
if (err)
|
||||
goto free_ptp;
|
||||
@@ -977,8 +979,6 @@ int idpf_ptp_init(struct idpf_adapter *adapter)
|
||||
goto remove_clock;
|
||||
}
|
||||
|
||||
spin_lock_init(&adapter->ptp->read_dev_clk_lock);
|
||||
|
||||
pci_dbg(adapter->pdev, "PTP init successful\n");
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -749,11 +749,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
|
||||
|
||||
for (p = 0; p < lan966x->num_phys_ports; p++) {
|
||||
port = lan966x->ports[p];
|
||||
if (!port)
|
||||
if (!port || !port->dev)
|
||||
continue;
|
||||
|
||||
if (port->dev)
|
||||
unregister_netdev(port->dev);
|
||||
unregister_netdev(port->dev);
|
||||
|
||||
lan966x_xdp_port_deinit(port);
|
||||
if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
|
||||
@@ -873,6 +872,9 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
dev_err(lan966x->dev, "register_netdev failed\n");
|
||||
phylink_destroy(phylink);
|
||||
port->phylink = NULL;
|
||||
port->dev = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -5103,6 +5103,13 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* All vports are already or become configured, nothing to distribute */
|
||||
if (non_requested_count == 0) {
|
||||
p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
|
||||
p_hwfn->qm_info.wfq_data[vport_id].configured = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
total_left_rate = min_pf_rate - total_req_min_rate;
|
||||
|
||||
left_rate_per_vp = total_left_rate / non_requested_count;
|
||||
|
||||
@@ -1108,9 +1108,12 @@ static int ravb_stop_dma(struct net_device *ndev)
|
||||
|
||||
/* Request for transmission suspension */
|
||||
ravb_modify(ndev, CCC, CCC_DTSR, CCC_DTSR);
|
||||
error = ravb_wait(ndev, CSR, CSR_DTS, CSR_DTS);
|
||||
if (error)
|
||||
netdev_err(ndev, "failed to stop AXI BUS\n");
|
||||
/* Access to URAM will not be suspended if WoL is enabled. */
|
||||
if (!priv->wol_enabled) {
|
||||
error = ravb_wait(ndev, CSR, CSR_DTS, CSR_DTS);
|
||||
if (error)
|
||||
netdev_err(ndev, "failed to stop AXI BUS\n");
|
||||
}
|
||||
|
||||
/* Stop AVB-DMAC process */
|
||||
return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
|
||||
|
||||
@@ -1825,6 +1825,7 @@ static int icssm_prueth_probe(struct platform_device *pdev)
|
||||
dev_err(dev, "%pOF error reading port_id %d\n",
|
||||
eth_node, ret);
|
||||
of_node_put(eth_node);
|
||||
of_node_put(eth_ports_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -984,7 +984,7 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
|
||||
|
||||
case FZA_STATE_UNINITIALIZED:
|
||||
netif_carrier_off(dev);
|
||||
timer_delete_sync(&fp->reset_timer);
|
||||
timer_delete_sync_try(&fp->reset_timer);
|
||||
fp->ring_cmd_index = 0;
|
||||
fp->ring_uns_index = 0;
|
||||
fp->ring_rmc_tx_index = 0;
|
||||
@@ -1018,7 +1018,9 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
|
||||
fp->queue_active = 0;
|
||||
netif_stop_queue(dev);
|
||||
pr_debug("%s: queue stopped\n", fp->name);
|
||||
timer_delete_sync(&fp->reset_timer);
|
||||
|
||||
spin_lock(&fp->lock);
|
||||
timer_delete(&fp->reset_timer);
|
||||
pr_warn("%s: halted, reason: %x\n", fp->name,
|
||||
FZA_STATUS_GET_HALT(status));
|
||||
fza_regs_dump(fp);
|
||||
@@ -1027,6 +1029,8 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
|
||||
fp->timer_state = 0;
|
||||
fp->reset_timer.expires = jiffies + 45 * HZ;
|
||||
add_timer(&fp->reset_timer);
|
||||
spin_unlock(&fp->lock);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -1046,7 +1050,9 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
|
||||
static void fza_reset_timer(struct timer_list *t)
|
||||
{
|
||||
struct fza_private *fp = timer_container_of(fp, t, reset_timer);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fp->lock, flags);
|
||||
if (!fp->timer_state) {
|
||||
pr_err("%s: RESET timed out!\n", fp->name);
|
||||
pr_info("%s: trying harder...\n", fp->name);
|
||||
@@ -1069,6 +1075,7 @@ static void fza_reset_timer(struct timer_list *t)
|
||||
fp->reset_timer.expires = jiffies + 45 * HZ;
|
||||
}
|
||||
add_timer(&fp->reset_timer);
|
||||
spin_unlock_irqrestore(&fp->lock, flags);
|
||||
}
|
||||
|
||||
static int fza_set_mac_address(struct net_device *dev, void *addr)
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
|
||||
#include <uapi/linux/if_macsec.h>
|
||||
|
||||
static struct workqueue_struct *macsec_wq;
|
||||
|
||||
/* SecTAG length = macsec_eth_header without the optional SCI */
|
||||
#define MACSEC_TAG_LEN 6
|
||||
|
||||
@@ -174,9 +176,10 @@ static void macsec_rxsc_put(struct macsec_rx_sc *sc)
|
||||
call_rcu(&sc->rcu_head, free_rx_sc_rcu);
|
||||
}
|
||||
|
||||
static void free_rxsa(struct rcu_head *head)
|
||||
static void free_rxsa_work(struct work_struct *work)
|
||||
{
|
||||
struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
|
||||
struct macsec_rx_sa *sa =
|
||||
container_of(to_rcu_work(work), struct macsec_rx_sa, destroy_work);
|
||||
|
||||
crypto_free_aead(sa->key.tfm);
|
||||
free_percpu(sa->stats);
|
||||
@@ -186,7 +189,7 @@ static void free_rxsa(struct rcu_head *head)
|
||||
static void macsec_rxsa_put(struct macsec_rx_sa *sa)
|
||||
{
|
||||
if (refcount_dec_and_test(&sa->refcnt))
|
||||
call_rcu(&sa->rcu, free_rxsa);
|
||||
queue_rcu_work(macsec_wq, &sa->destroy_work);
|
||||
}
|
||||
|
||||
static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
|
||||
@@ -202,9 +205,10 @@ static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
|
||||
return sa;
|
||||
}
|
||||
|
||||
static void free_txsa(struct rcu_head *head)
|
||||
static void free_txsa_work(struct work_struct *work)
|
||||
{
|
||||
struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
|
||||
struct macsec_tx_sa *sa =
|
||||
container_of(to_rcu_work(work), struct macsec_tx_sa, destroy_work);
|
||||
|
||||
crypto_free_aead(sa->key.tfm);
|
||||
free_percpu(sa->stats);
|
||||
@@ -214,7 +218,7 @@ static void free_txsa(struct rcu_head *head)
|
||||
static void macsec_txsa_put(struct macsec_tx_sa *sa)
|
||||
{
|
||||
if (refcount_dec_and_test(&sa->refcnt))
|
||||
call_rcu(&sa->rcu, free_txsa);
|
||||
queue_rcu_work(macsec_wq, &sa->destroy_work);
|
||||
}
|
||||
|
||||
static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
|
||||
@@ -1407,6 +1411,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
|
||||
rx_sa->next_pn = 1;
|
||||
refcount_set(&rx_sa->refcnt, 1);
|
||||
spin_lock_init(&rx_sa->lock);
|
||||
INIT_RCU_WORK(&rx_sa->destroy_work, free_rxsa_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1506,6 +1511,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
|
||||
tx_sa->active = false;
|
||||
refcount_set(&tx_sa->refcnt, 1);
|
||||
spin_lock_init(&tx_sa->lock);
|
||||
INIT_RCU_WORK(&tx_sa->destroy_work, free_txsa_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4505,25 +4511,35 @@ static int __init macsec_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
macsec_wq = alloc_workqueue("macsec", WQ_UNBOUND, 0);
|
||||
if (!macsec_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_info("MACsec IEEE 802.1AE\n");
|
||||
err = register_netdevice_notifier(&macsec_notifier);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_destroy_wq;
|
||||
|
||||
err = rtnl_link_register(&macsec_link_ops);
|
||||
if (err)
|
||||
goto notifier;
|
||||
goto err_notifier;
|
||||
|
||||
err = genl_register_family(&macsec_fam);
|
||||
if (err)
|
||||
goto rtnl;
|
||||
goto err_rtnl;
|
||||
|
||||
return 0;
|
||||
|
||||
rtnl:
|
||||
err_rtnl:
|
||||
rtnl_link_unregister(&macsec_link_ops);
|
||||
notifier:
|
||||
err_notifier:
|
||||
unregister_netdevice_notifier(&macsec_notifier);
|
||||
err_destroy_wq:
|
||||
/* Precautionary, mirrors macsec_exit() to stay safe if work
|
||||
* ever becomes queueable before this point in the future.
|
||||
*/
|
||||
rcu_barrier();
|
||||
destroy_workqueue(macsec_wq);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -4533,6 +4549,7 @@ static void __exit macsec_exit(void)
|
||||
rtnl_link_unregister(&macsec_link_ops);
|
||||
unregister_netdevice_notifier(&macsec_notifier);
|
||||
rcu_barrier();
|
||||
destroy_workqueue(macsec_wq);
|
||||
}
|
||||
|
||||
module_init(macsec_init);
|
||||
|
||||
@@ -502,7 +502,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
|
||||
|
||||
/* Align MTU of slave with failover dev */
|
||||
orig_mtu = slave_dev->mtu;
|
||||
err = dev_set_mtu(slave_dev, failover_dev->mtu);
|
||||
err = netif_set_mtu(slave_dev, failover_dev->mtu);
|
||||
if (err) {
|
||||
netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n",
|
||||
slave_dev->name, failover_dev->mtu);
|
||||
@@ -512,11 +512,11 @@ static int net_failover_slave_register(struct net_device *slave_dev,
|
||||
dev_hold(slave_dev);
|
||||
|
||||
if (netif_running(failover_dev)) {
|
||||
err = dev_open(slave_dev, NULL);
|
||||
err = netif_open(slave_dev, NULL);
|
||||
if (err && (err != -EBUSY)) {
|
||||
netdev_err(failover_dev, "Opening slave %s failed err:%d\n",
|
||||
slave_dev->name, err);
|
||||
goto err_dev_open;
|
||||
goto err_netif_open;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,10 +562,10 @@ static int net_failover_slave_register(struct net_device *slave_dev,
|
||||
err_vlan_add:
|
||||
dev_uc_unsync(slave_dev, failover_dev);
|
||||
dev_mc_unsync(slave_dev, failover_dev);
|
||||
dev_close(slave_dev);
|
||||
err_dev_open:
|
||||
netif_close(slave_dev);
|
||||
err_netif_open:
|
||||
dev_put(slave_dev);
|
||||
dev_set_mtu(slave_dev, orig_mtu);
|
||||
netif_set_mtu(slave_dev, orig_mtu);
|
||||
done:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -393,6 +393,7 @@ static struct phy_driver dp83811_driver[] = {
|
||||
.config_init = dp83811_config_init,
|
||||
.config_aneg = dp83811_config_aneg,
|
||||
.soft_reset = dp83811_phy_reset,
|
||||
.get_features = genphy_c45_pma_read_ext_abilities,
|
||||
.get_wol = dp83811_get_wol,
|
||||
.set_wol = dp83811_set_wol,
|
||||
.config_intr = dp83811_config_intr,
|
||||
|
||||
@@ -740,6 +740,8 @@ static int uhdlc_open(struct net_device *dev)
|
||||
|
||||
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
|
||||
qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
|
||||
|
||||
@@ -770,6 +772,11 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
|
||||
kfree(priv->rx_skbuff);
|
||||
priv->rx_skbuff = NULL;
|
||||
|
||||
for (i = 0; i < TX_BD_RING_LEN; i++) {
|
||||
dev_kfree_skb(priv->tx_skbuff[i]);
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
}
|
||||
|
||||
kfree(priv->tx_skbuff);
|
||||
priv->tx_skbuff = NULL;
|
||||
|
||||
|
||||
@@ -305,9 +305,11 @@ struct xt_counters *xt_counters_alloc(unsigned int counters);
|
||||
|
||||
struct xt_table *xt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct nf_hook_ops *template_ops,
|
||||
struct xt_table_info *bootstrap,
|
||||
struct xt_table_info *newinfo);
|
||||
void *xt_unregister_table(struct xt_table *table);
|
||||
void xt_unregister_table_pre_exit(struct net *net, u8 af, const char *name);
|
||||
struct xt_table *xt_unregister_table_exit(struct net *net, u8 af, const char *name);
|
||||
|
||||
struct xt_table_info *xt_replace_table(struct xt_table *table,
|
||||
unsigned int num_counters,
|
||||
|
||||
@@ -53,7 +53,6 @@ int arpt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct arpt_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
void arpt_unregister_table(struct net *net, const char *name);
|
||||
void arpt_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state);
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ipt_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
|
||||
void ipt_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
void ipt_unregister_table_exit(struct net *net, const char *name);
|
||||
|
||||
/* Standard entry. */
|
||||
|
||||
@@ -27,7 +27,6 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *);
|
||||
int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ip6t_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
void ip6t_unregister_table_exit(struct net *net, const char *name);
|
||||
extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state);
|
||||
|
||||
@@ -489,8 +489,10 @@ genlmsg_multicast_netns_filtered(const struct genl_family *family,
|
||||
netlink_filter_fn filter,
|
||||
void *filter_data)
|
||||
{
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps))
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps)) {
|
||||
nlmsg_free(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
group = family->mcgrp_offset + group;
|
||||
return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group,
|
||||
flags, filter, filter_data);
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/u64_stats_sync.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <uapi/linux/if_link.h>
|
||||
#include <uapi/linux/if_macsec.h>
|
||||
|
||||
@@ -123,6 +124,7 @@ struct macsec_dev_stats {
|
||||
* @key: key structure
|
||||
* @ssci: short secure channel identifier
|
||||
* @stats: per-SA stats
|
||||
* @destroy_work: deferred work to free the SA in process context after RCU grace period
|
||||
*/
|
||||
struct macsec_rx_sa {
|
||||
struct macsec_key key;
|
||||
@@ -136,7 +138,7 @@ struct macsec_rx_sa {
|
||||
bool active;
|
||||
struct macsec_rx_sa_stats __percpu *stats;
|
||||
struct macsec_rx_sc *sc;
|
||||
struct rcu_head rcu;
|
||||
struct rcu_work destroy_work;
|
||||
};
|
||||
|
||||
struct pcpu_rx_sc_stats {
|
||||
@@ -174,6 +176,7 @@ struct macsec_rx_sc {
|
||||
* @key: key structure
|
||||
* @ssci: short secure channel identifier
|
||||
* @stats: per-SA stats
|
||||
* @destroy_work: deferred work to free the SA in process context after RCU grace period
|
||||
*/
|
||||
struct macsec_tx_sa {
|
||||
struct macsec_key key;
|
||||
@@ -186,7 +189,7 @@ struct macsec_tx_sa {
|
||||
refcount_t refcnt;
|
||||
bool active;
|
||||
struct macsec_tx_sa_stats __percpu *stats;
|
||||
struct rcu_head rcu;
|
||||
struct rcu_work destroy_work;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -45,9 +45,12 @@ struct nf_conntrack_expect {
|
||||
void (*expectfn)(struct nf_conn *new,
|
||||
struct nf_conntrack_expect *this);
|
||||
|
||||
/* Helper to assign to new connection */
|
||||
/* Helper that created this expectation */
|
||||
struct nf_conntrack_helper __rcu *helper;
|
||||
|
||||
/* Helper to assign to new connection */
|
||||
struct nf_conntrack_helper __rcu *assign_helper;
|
||||
|
||||
/* The conntrack of the master connection */
|
||||
struct nf_conn *master;
|
||||
|
||||
|
||||
@@ -119,6 +119,7 @@ struct netns_ipv6 {
|
||||
struct fib_notifier_ops *notifier_ops;
|
||||
struct fib_notifier_ops *ip6mr_notifier_ops;
|
||||
atomic_t ipmr_seq;
|
||||
int flowlabel_count;
|
||||
struct {
|
||||
struct hlist_head head;
|
||||
spinlock_t lock;
|
||||
|
||||
@@ -247,10 +247,10 @@ struct nshhdr {
|
||||
#define NSH_M_TYPE1_LEN 24
|
||||
|
||||
/* NSH header maximum Length. */
|
||||
#define NSH_HDR_MAX_LEN 256
|
||||
#define NSH_HDR_MAX_LEN ((NSH_LEN_MASK >> NSH_LEN_SHIFT) * 4)
|
||||
|
||||
/* NSH context headers maximum Length. */
|
||||
#define NSH_CTX_HDRS_MAX_LEN 248
|
||||
#define NSH_CTX_HDRS_MAX_LEN (NSH_HDR_MAX_LEN - NSH_BASE_HDR_LEN)
|
||||
|
||||
static inline struct nshhdr *nsh_hdr(struct sk_buff *skb)
|
||||
{
|
||||
|
||||
@@ -179,6 +179,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||
break;
|
||||
default:
|
||||
pr_alert("bad message type %d\n", (int)msg->type);
|
||||
dev_kfree_skb(skb);
|
||||
/* Paired with find_get_vcc(msg->vcc) above */
|
||||
sock_put(sk);
|
||||
return -EINVAL;
|
||||
|
||||
@@ -173,19 +173,12 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
|
||||
static struct batadv_neigh_node *
|
||||
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
|
||||
const u8 *neigh_addr,
|
||||
struct batadv_orig_node *orig_node,
|
||||
struct batadv_orig_node *orig_neigh)
|
||||
struct batadv_orig_node *orig_node)
|
||||
{
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
|
||||
neigh_node = batadv_neigh_node_get_or_create(orig_node,
|
||||
hard_iface, neigh_addr);
|
||||
if (!neigh_node)
|
||||
goto out;
|
||||
|
||||
neigh_node->orig_node = orig_neigh;
|
||||
|
||||
out:
|
||||
return neigh_node;
|
||||
}
|
||||
|
||||
@@ -335,7 +328,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
|
||||
struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
|
||||
const char *fwd_str;
|
||||
u8 packet_num;
|
||||
s16 buff_pos;
|
||||
int buff_pos;
|
||||
struct batadv_ogm_packet *batadv_ogm_packet;
|
||||
struct sk_buff *skb;
|
||||
u8 *packet_pos;
|
||||
@@ -906,6 +899,31 @@ static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node,
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_neigh_ifinfo_sum() - Get bcast_own sum for a last-hop neighbor
|
||||
* @bat_priv: the bat priv with all the mesh interface information
|
||||
* @neigh_node: last-hop neighbor of an originator
|
||||
*
|
||||
* Return: Number of replied (rebroadcasted) OGMs for the originator currently
|
||||
* announced by the neighbor. Returns 0 if the neighbor's originator entry is
|
||||
* not available anymore.
|
||||
*/
|
||||
static u8 batadv_iv_ogm_neigh_ifinfo_sum(struct batadv_priv *bat_priv,
|
||||
const struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
struct batadv_orig_node *orig_neigh;
|
||||
u8 sum;
|
||||
|
||||
orig_neigh = batadv_orig_hash_find(bat_priv, neigh_node->addr);
|
||||
if (!orig_neigh)
|
||||
return 0;
|
||||
|
||||
sum = batadv_iv_orig_ifinfo_sum(orig_neigh, neigh_node->if_incoming);
|
||||
batadv_orig_node_put(orig_neigh);
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
|
||||
* originator
|
||||
@@ -975,17 +993,9 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
|
||||
}
|
||||
|
||||
if (!neigh_node) {
|
||||
struct batadv_orig_node *orig_tmp;
|
||||
|
||||
orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
|
||||
if (!orig_tmp)
|
||||
goto unlock;
|
||||
|
||||
neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
|
||||
ethhdr->h_source,
|
||||
orig_node, orig_tmp);
|
||||
|
||||
batadv_orig_node_put(orig_tmp);
|
||||
orig_node);
|
||||
if (!neigh_node)
|
||||
goto unlock;
|
||||
} else {
|
||||
@@ -1037,10 +1047,9 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
|
||||
*/
|
||||
if (router_ifinfo &&
|
||||
neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
|
||||
sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node,
|
||||
router->if_incoming);
|
||||
sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node,
|
||||
neigh_node->if_incoming);
|
||||
sum_orig = batadv_iv_ogm_neigh_ifinfo_sum(bat_priv, router);
|
||||
sum_neigh = batadv_iv_ogm_neigh_ifinfo_sum(bat_priv,
|
||||
neigh_node);
|
||||
if (sum_orig >= sum_neigh)
|
||||
goto out;
|
||||
}
|
||||
@@ -1106,7 +1115,6 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
|
||||
if (!neigh_node)
|
||||
neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
|
||||
orig_neigh_node->orig,
|
||||
orig_neigh_node,
|
||||
orig_neigh_node);
|
||||
|
||||
if (!neigh_node)
|
||||
@@ -1302,6 +1310,32 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_to_direct_router() - get direct next hop neighbor to an orig address
|
||||
* @bat_priv: the bat priv with all the mesh interface information
|
||||
* @orig_addr: the originator MAC address to search the best next hop router for
|
||||
* @if_outgoing: the interface where the OGM should be sent to
|
||||
*
|
||||
* Return: A neighbor node which is the best router towards the given originator
|
||||
* address. Bonding candidates are ignored.
|
||||
*/
|
||||
static struct batadv_neigh_node *
|
||||
batadv_orig_to_direct_router(struct batadv_priv *bat_priv, u8 *orig_addr,
|
||||
struct batadv_hard_iface *if_outgoing)
|
||||
{
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_node *orig_node;
|
||||
|
||||
orig_node = batadv_orig_hash_find(bat_priv, orig_addr);
|
||||
if (!orig_node)
|
||||
return NULL;
|
||||
|
||||
neigh_node = batadv_orig_router_get(orig_node, if_outgoing);
|
||||
batadv_orig_node_put(orig_node);
|
||||
|
||||
return neigh_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
|
||||
* interface
|
||||
@@ -1372,8 +1406,9 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
|
||||
|
||||
router = batadv_orig_router_get(orig_node, if_outgoing);
|
||||
if (router) {
|
||||
router_router = batadv_orig_router_get(router->orig_node,
|
||||
if_outgoing);
|
||||
router_router = batadv_orig_to_direct_router(bat_priv,
|
||||
router->addr,
|
||||
if_outgoing);
|
||||
router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
|
||||
}
|
||||
|
||||
|
||||
@@ -318,8 +318,8 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
|
||||
if (claim->backbone_gw != backbone_gw)
|
||||
continue;
|
||||
|
||||
batadv_claim_put(claim);
|
||||
hlist_del_rcu(&claim->hash_entry);
|
||||
batadv_claim_put(claim);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
}
|
||||
@@ -723,6 +723,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
|
||||
|
||||
if (unlikely(hash_added != 0)) {
|
||||
/* only local changes happened. */
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
kfree(claim);
|
||||
return;
|
||||
}
|
||||
@@ -1288,6 +1289,13 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(claim, head, hash_entry) {
|
||||
/* only purge claims not currently in the process of being released.
|
||||
* Such claims could otherwise have a NULL-ptr backbone_gw set because
|
||||
* they already went through batadv_claim_release()
|
||||
*/
|
||||
if (!kref_get_unless_zero(&claim->refcount))
|
||||
continue;
|
||||
|
||||
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
|
||||
if (now)
|
||||
goto purge_now;
|
||||
@@ -1313,6 +1321,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
|
||||
claim->addr, claim->vid);
|
||||
skip:
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
batadv_claim_put(claim);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -249,6 +249,7 @@ void batadv_mesh_free(struct net_device *mesh_iface)
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
|
||||
batadv_purge_outstanding_packets(bat_priv, NULL);
|
||||
batadv_tp_stop_all(bat_priv);
|
||||
|
||||
batadv_gw_node_free(bat_priv);
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/byteorder/generic.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/etherdevice.h>
|
||||
@@ -365,23 +366,38 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
|
||||
* @bat_priv: the bat priv with all the mesh interface information
|
||||
* @tp_vars: the private data of the current TP meter session to cleanup
|
||||
* batadv_tp_list_detach() - remove tp session from mesh session list once
|
||||
* @tp_vars: the private data of the current TP meter session
|
||||
*/
|
||||
static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
|
||||
struct batadv_tp_vars *tp_vars)
|
||||
static void batadv_tp_list_detach(struct batadv_tp_vars *tp_vars)
|
||||
{
|
||||
cancel_delayed_work(&tp_vars->finish_work);
|
||||
bool detached = false;
|
||||
|
||||
spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
|
||||
hlist_del_rcu(&tp_vars->list);
|
||||
if (!hlist_unhashed(&tp_vars->list)) {
|
||||
hlist_del_init_rcu(&tp_vars->list);
|
||||
detached = true;
|
||||
}
|
||||
spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
|
||||
|
||||
if (!detached)
|
||||
return;
|
||||
|
||||
atomic_dec(&tp_vars->bat_priv->tp_num);
|
||||
|
||||
/* drop list reference */
|
||||
batadv_tp_vars_put(tp_vars);
|
||||
}
|
||||
|
||||
atomic_dec(&tp_vars->bat_priv->tp_num);
|
||||
/**
|
||||
* batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
|
||||
* @tp_vars: the private data of the current TP meter session to cleanup
|
||||
*/
|
||||
static void batadv_tp_sender_cleanup(struct batadv_tp_vars *tp_vars)
|
||||
{
|
||||
cancel_delayed_work_sync(&tp_vars->finish_work);
|
||||
|
||||
batadv_tp_list_detach(tp_vars);
|
||||
|
||||
/* kill the timer and remove its reference */
|
||||
timer_delete_sync(&tp_vars->timer);
|
||||
@@ -886,7 +902,8 @@ static int batadv_tp_send(void *arg)
|
||||
batadv_orig_node_put(orig_node);
|
||||
|
||||
batadv_tp_sender_end(bat_priv, tp_vars);
|
||||
batadv_tp_sender_cleanup(bat_priv, tp_vars);
|
||||
batadv_tp_sender_cleanup(tp_vars);
|
||||
complete(&tp_vars->finished);
|
||||
|
||||
batadv_tp_vars_put(tp_vars);
|
||||
|
||||
@@ -918,7 +935,8 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
|
||||
batadv_tp_vars_put(tp_vars);
|
||||
|
||||
/* cleanup of failed tp meter variables */
|
||||
batadv_tp_sender_cleanup(bat_priv, tp_vars);
|
||||
batadv_tp_sender_cleanup(tp_vars);
|
||||
complete(&tp_vars->finished);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -947,6 +965,13 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
|
||||
|
||||
/* look for an already existing test towards this node */
|
||||
spin_lock_bh(&bat_priv->tp_list_lock);
|
||||
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) {
|
||||
spin_unlock_bh(&bat_priv->tp_list_lock);
|
||||
batadv_tp_batctl_error_notify(BATADV_TP_REASON_DST_UNREACHABLE,
|
||||
dst, bat_priv, session_cookie);
|
||||
return;
|
||||
}
|
||||
|
||||
tp_vars = batadv_tp_list_find(bat_priv, dst);
|
||||
if (tp_vars) {
|
||||
spin_unlock_bh(&bat_priv->tp_list_lock);
|
||||
@@ -969,6 +994,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
|
||||
|
||||
tp_vars = kmalloc_obj(*tp_vars, GFP_ATOMIC);
|
||||
if (!tp_vars) {
|
||||
atomic_dec(&bat_priv->tp_num);
|
||||
spin_unlock_bh(&bat_priv->tp_list_lock);
|
||||
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
|
||||
"Meter: %s cannot allocate list elements\n",
|
||||
@@ -1017,6 +1043,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
|
||||
tp_vars->start_time = jiffies;
|
||||
|
||||
init_waitqueue_head(&tp_vars->more_bytes);
|
||||
init_completion(&tp_vars->finished);
|
||||
|
||||
spin_lock_init(&tp_vars->unacked_lock);
|
||||
INIT_LIST_HEAD(&tp_vars->unacked_list);
|
||||
@@ -1119,14 +1146,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
|
||||
"Shutting down for inactivity (more than %dms) from %pM\n",
|
||||
BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
|
||||
|
||||
spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
|
||||
hlist_del_rcu(&tp_vars->list);
|
||||
spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
|
||||
|
||||
/* drop list reference */
|
||||
batadv_tp_vars_put(tp_vars);
|
||||
|
||||
atomic_dec(&bat_priv->tp_num);
|
||||
batadv_tp_list_detach(tp_vars);
|
||||
|
||||
spin_lock_bh(&tp_vars->unacked_lock);
|
||||
list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
|
||||
@@ -1329,9 +1349,12 @@ static struct batadv_tp_vars *
|
||||
batadv_tp_init_recv(struct batadv_priv *bat_priv,
|
||||
const struct batadv_icmp_tp_packet *icmp)
|
||||
{
|
||||
struct batadv_tp_vars *tp_vars;
|
||||
struct batadv_tp_vars *tp_vars = NULL;
|
||||
|
||||
spin_lock_bh(&bat_priv->tp_list_lock);
|
||||
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
|
||||
goto out_unlock;
|
||||
|
||||
tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
|
||||
icmp->session);
|
||||
if (tp_vars)
|
||||
@@ -1344,8 +1367,10 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
|
||||
}
|
||||
|
||||
tp_vars = kmalloc_obj(*tp_vars, GFP_ATOMIC);
|
||||
if (!tp_vars)
|
||||
if (!tp_vars) {
|
||||
atomic_dec(&bat_priv->tp_num);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ether_addr_copy(tp_vars->other_end, icmp->orig);
|
||||
tp_vars->role = BATADV_TP_RECEIVER;
|
||||
@@ -1464,6 +1489,9 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
|
||||
{
|
||||
struct batadv_icmp_tp_packet *icmp;
|
||||
|
||||
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
|
||||
goto out;
|
||||
|
||||
icmp = (struct batadv_icmp_tp_packet *)skb->data;
|
||||
|
||||
switch (icmp->subtype) {
|
||||
@@ -1478,9 +1506,57 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
|
||||
"Received unknown TP Metric packet type %u\n",
|
||||
icmp->subtype);
|
||||
}
|
||||
|
||||
out:
|
||||
consume_skb(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tp_stop_all() - stop all currently running tp meter sessions
|
||||
* @bat_priv: the bat priv with all the mesh interface information
|
||||
*/
|
||||
void batadv_tp_stop_all(struct batadv_priv *bat_priv)
|
||||
{
|
||||
struct batadv_tp_vars *tp_vars[BATADV_TP_MAX_NUM];
|
||||
struct batadv_tp_vars *tp_var;
|
||||
size_t count = 0;
|
||||
size_t i;
|
||||
|
||||
spin_lock_bh(&bat_priv->tp_list_lock);
|
||||
hlist_for_each_entry(tp_var, &bat_priv->tp_list, list) {
|
||||
if (WARN_ON_ONCE(count >= BATADV_TP_MAX_NUM))
|
||||
break;
|
||||
|
||||
if (!kref_get_unless_zero(&tp_var->refcount))
|
||||
continue;
|
||||
|
||||
tp_vars[count++] = tp_var;
|
||||
}
|
||||
spin_unlock_bh(&bat_priv->tp_list_lock);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
tp_var = tp_vars[i];
|
||||
|
||||
switch (tp_var->role) {
|
||||
case BATADV_TP_SENDER:
|
||||
batadv_tp_sender_shutdown(tp_var,
|
||||
BATADV_TP_REASON_CANCEL);
|
||||
wake_up(&tp_var->more_bytes);
|
||||
wait_for_completion(&tp_var->finished);
|
||||
break;
|
||||
case BATADV_TP_RECEIVER:
|
||||
batadv_tp_list_detach(tp_var);
|
||||
if (timer_shutdown_sync(&tp_var->timer))
|
||||
batadv_tp_vars_put(tp_var);
|
||||
break;
|
||||
}
|
||||
|
||||
batadv_tp_vars_put(tp_var);
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tp_meter_init() - initialize global tp_meter structures
|
||||
*/
|
||||
|
||||
@@ -17,6 +17,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
|
||||
u32 test_length, u32 *cookie);
|
||||
void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
|
||||
u8 return_value);
|
||||
void batadv_tp_stop_all(struct batadv_priv *bat_priv);
|
||||
void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_TP_METER_H_ */
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/average.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/kref.h>
|
||||
@@ -1328,6 +1329,9 @@ struct batadv_tp_vars {
|
||||
/** @finish_work: work item for the finishing procedure */
|
||||
struct delayed_work finish_work;
|
||||
|
||||
/** @finished: completion signaled when a sender thread exits */
|
||||
struct completion finished;
|
||||
|
||||
/** @test_length: test length in milliseconds */
|
||||
u32 test_length;
|
||||
|
||||
|
||||
@@ -112,24 +112,22 @@ static struct pernet_operations broute_net_ops = {
|
||||
|
||||
static int __init ebtable_broute_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&broute_table, broute_table_init);
|
||||
int ret = register_pernet_subsys(&broute_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&broute_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&broute_table);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&broute_table, broute_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_broute_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
ebt_unregister_template(&broute_table);
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_broute_init);
|
||||
|
||||
@@ -93,24 +93,22 @@ static struct pernet_operations frame_filter_net_ops = {
|
||||
|
||||
static int __init ebtable_filter_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&frame_filter, frame_filter_table_init);
|
||||
int ret = register_pernet_subsys(&frame_filter_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&frame_filter_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&frame_filter);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&frame_filter, frame_filter_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
ebt_unregister_template(&frame_filter);
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_filter_init);
|
||||
|
||||
@@ -93,24 +93,22 @@ static struct pernet_operations frame_nat_net_ops = {
|
||||
|
||||
static int __init ebtable_nat_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&frame_nat, frame_nat_table_init);
|
||||
int ret = register_pernet_subsys(&frame_nat_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&frame_nat_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&frame_nat);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&frame_nat, frame_nat_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_nat_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
ebt_unregister_template(&frame_nat);
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_nat_init);
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
|
||||
struct ebt_pernet {
|
||||
struct list_head tables;
|
||||
struct list_head dead_tables;
|
||||
};
|
||||
|
||||
struct ebt_template {
|
||||
@@ -1162,11 +1163,6 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
|
||||
static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
|
||||
{
|
||||
mutex_lock(&ebt_mutex);
|
||||
list_del(&table->list);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
|
||||
ebt_cleanup_entry, net, NULL);
|
||||
if (table->private->nentries)
|
||||
@@ -1267,13 +1263,15 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = table;
|
||||
|
||||
list_add(&table->list, &ebt_net->tables);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
table->ops = ops;
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
synchronize_rcu();
|
||||
__ebt_unregister_table(net, table);
|
||||
} else {
|
||||
list_add(&table->list, &ebt_net->tables);
|
||||
}
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
|
||||
AUDIT_XT_OP_REGISTER, GFP_KERNEL);
|
||||
@@ -1339,7 +1337,7 @@ void ebt_unregister_template(const struct ebt_table *t)
|
||||
}
|
||||
EXPORT_SYMBOL(ebt_unregister_template);
|
||||
|
||||
static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
|
||||
void ebt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
struct ebt_table *t;
|
||||
@@ -1348,30 +1346,36 @@ static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
|
||||
|
||||
list_for_each_entry(t, &ebt_net->tables, list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_move(&t->list, &ebt_net->dead_tables);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return t;
|
||||
nf_unregister_net_hooks(net, t->ops, hweight32(t->valid_hooks));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ebt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_table *table = __ebt_find_table(net, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
|
||||
|
||||
void ebt_unregister_table(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_table *table = __ebt_find_table(net, name);
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
struct ebt_table *t;
|
||||
|
||||
if (table)
|
||||
__ebt_unregister_table(net, table);
|
||||
mutex_lock(&ebt_mutex);
|
||||
|
||||
list_for_each_entry(t, &ebt_net->dead_tables, list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_del(&t->list);
|
||||
audit_log_nfcfg(t->name, AF_BRIDGE, t->private->nentries,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
__ebt_unregister_table(net, t);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ebt_mutex);
|
||||
}
|
||||
|
||||
/* userspace just supplied us with counters */
|
||||
@@ -2556,11 +2560,21 @@ static int __net_init ebt_pernet_init(struct net *net)
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
|
||||
INIT_LIST_HEAD(&ebt_net->tables);
|
||||
INIT_LIST_HEAD(&ebt_net->dead_tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit ebt_pernet_exit(struct net *net)
|
||||
{
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&ebt_net->tables));
|
||||
WARN_ON_ONCE(!list_empty(&ebt_net->dead_tables));
|
||||
}
|
||||
|
||||
static struct pernet_operations ebt_net_ops = {
|
||||
.init = ebt_pernet_init,
|
||||
.exit = ebt_pernet_exit,
|
||||
.id = &ebt_pernet_id,
|
||||
.size = sizeof(struct ebt_pernet),
|
||||
};
|
||||
@@ -2569,19 +2583,20 @@ static int __init ebtables_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = xt_register_target(&ebt_standard_target);
|
||||
ret = register_pernet_subsys(&ebt_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = nf_register_sockopt(&ebt_sockopts);
|
||||
|
||||
ret = xt_register_target(&ebt_standard_target);
|
||||
if (ret < 0) {
|
||||
xt_unregister_target(&ebt_standard_target);
|
||||
unregister_pernet_subsys(&ebt_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ebt_net_ops);
|
||||
ret = nf_register_sockopt(&ebt_sockopts);
|
||||
if (ret < 0) {
|
||||
nf_unregister_sockopt(&ebt_sockopts);
|
||||
xt_unregister_target(&ebt_standard_target);
|
||||
unregister_pernet_subsys(&ebt_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -6862,9 +6862,9 @@ static void skb_defer_free_flush(void)
|
||||
|
||||
#if defined(CONFIG_NET_RX_BUSY_POLL)
|
||||
|
||||
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
|
||||
static void __busy_poll_stop(struct napi_struct *napi, unsigned long timeout)
|
||||
{
|
||||
if (!skip_schedule) {
|
||||
if (!timeout) {
|
||||
gro_normal_list(&napi->gro);
|
||||
__napi_schedule(napi);
|
||||
return;
|
||||
@@ -6874,6 +6874,8 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
|
||||
gro_flush_normal(&napi->gro, HZ >= 1000);
|
||||
|
||||
clear_bit(NAPI_STATE_SCHED, &napi->state);
|
||||
hrtimer_start(&napi->timer, ns_to_ktime(timeout),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
enum {
|
||||
@@ -6885,8 +6887,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
|
||||
unsigned flags, u16 budget)
|
||||
{
|
||||
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
|
||||
bool skip_schedule = false;
|
||||
unsigned long timeout;
|
||||
unsigned long timeout = 0;
|
||||
int rc;
|
||||
|
||||
/* Busy polling means there is a high chance device driver hard irq
|
||||
@@ -6906,10 +6907,12 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
|
||||
|
||||
if (flags & NAPI_F_PREFER_BUSY_POLL) {
|
||||
napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi);
|
||||
timeout = napi_get_gro_flush_timeout(napi);
|
||||
if (napi->defer_hard_irqs_count && timeout) {
|
||||
hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
|
||||
skip_schedule = true;
|
||||
if (napi->defer_hard_irqs_count) {
|
||||
/* A short enough gro flush timeout and long enough
|
||||
* poll can result in timer firing too early.
|
||||
* Timer will be armed later if necessary.
|
||||
*/
|
||||
timeout = napi_get_gro_flush_timeout(napi);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6924,7 +6927,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
|
||||
trace_napi_poll(napi, rc, budget);
|
||||
netpoll_poll_unlock(have_poll_lock);
|
||||
if (rc == budget)
|
||||
__busy_poll_stop(napi, skip_schedule);
|
||||
__busy_poll_stop(napi, timeout);
|
||||
bpf_net_ctx_clear(bpf_net_ctx);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <uapi/linux/if_arp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/netdev_lock.h>
|
||||
#include <net/failover.h>
|
||||
|
||||
static LIST_HEAD(failover_list);
|
||||
@@ -221,8 +222,11 @@ failover_existing_slave_register(struct net_device *failover_dev)
|
||||
for_each_netdev(net, dev) {
|
||||
if (netif_is_failover(dev))
|
||||
continue;
|
||||
if (ether_addr_equal(failover_dev->perm_addr, dev->perm_addr))
|
||||
if (ether_addr_equal(failover_dev->perm_addr, dev->perm_addr)) {
|
||||
netdev_lock_ops(dev);
|
||||
failover_slave_register(dev);
|
||||
netdev_unlock_ops(dev);
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ static bool ethnl_bitmap32_not_zero(const u32 *map, unsigned int start,
|
||||
u32 mask;
|
||||
|
||||
if (end <= start)
|
||||
return true;
|
||||
return false;
|
||||
|
||||
if (start % 32) {
|
||||
mask = ethnl_upper_bits(start);
|
||||
@@ -105,11 +105,11 @@ static bool ethnl_bitmap32_not_zero(const u32 *map, unsigned int start,
|
||||
start_word++;
|
||||
}
|
||||
|
||||
if (!memchr_inv(map + start_word, '\0',
|
||||
(end_word - start_word) * sizeof(u32)))
|
||||
if (memchr_inv(map + start_word, '\0',
|
||||
(end_word - start_word) * sizeof(u32)))
|
||||
return true;
|
||||
if (end % 32 == 0)
|
||||
return true;
|
||||
return false;
|
||||
return map[end_word] & ethnl_lower_bits(end);
|
||||
}
|
||||
|
||||
|
||||
@@ -76,6 +76,7 @@ static int phy_prepare_data(const struct ethnl_req_info *req_info,
|
||||
struct nlattr **tb = info->attrs;
|
||||
struct phy_device_node *pdn;
|
||||
struct phy_device *phydev;
|
||||
int ret;
|
||||
|
||||
/* RTNL is held by the caller */
|
||||
phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PHY_HEADER,
|
||||
@@ -88,8 +89,19 @@ static int phy_prepare_data(const struct ethnl_req_info *req_info,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rep_data->phyindex = phydev->phyindex;
|
||||
|
||||
rep_data->name = kstrdup(dev_name(&phydev->mdio.dev), GFP_KERNEL);
|
||||
rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL);
|
||||
if (!rep_data->name)
|
||||
return -ENOMEM;
|
||||
|
||||
if (phydev->drv) {
|
||||
rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL);
|
||||
if (!rep_data->drvname) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_name;
|
||||
}
|
||||
}
|
||||
|
||||
rep_data->upstream_type = pdn->upstream_type;
|
||||
|
||||
if (pdn->upstream_type == PHY_UPSTREAM_PHY) {
|
||||
@@ -97,15 +109,33 @@ static int phy_prepare_data(const struct ethnl_req_info *req_info,
|
||||
rep_data->upstream_index = upstream->phyindex;
|
||||
}
|
||||
|
||||
if (pdn->parent_sfp_bus)
|
||||
if (pdn->parent_sfp_bus) {
|
||||
rep_data->upstream_sfp_name = kstrdup(sfp_get_name(pdn->parent_sfp_bus),
|
||||
GFP_KERNEL);
|
||||
if (!rep_data->upstream_sfp_name) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_drvname;
|
||||
}
|
||||
}
|
||||
|
||||
if (phydev->sfp_bus)
|
||||
if (phydev->sfp_bus) {
|
||||
rep_data->downstream_sfp_name = kstrdup(sfp_get_name(phydev->sfp_bus),
|
||||
GFP_KERNEL);
|
||||
if (!rep_data->downstream_sfp_name) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_upstream_sfp;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_upstream_sfp:
|
||||
kfree(rep_data->upstream_sfp_name);
|
||||
err_free_drvname:
|
||||
kfree(rep_data->drvname);
|
||||
err_free_name:
|
||||
kfree(rep_data->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int phy_fill_reply(struct sk_buff *skb,
|
||||
|
||||
@@ -889,7 +889,10 @@ int hsr_get_node_data(struct hsr_priv *hsr,
|
||||
|
||||
if (node->addr_B_port != HSR_PT_NONE) {
|
||||
port = hsr_port_get_hsr(hsr, node->addr_B_port);
|
||||
*addr_b_ifindex = port->dev->ifindex;
|
||||
if (port)
|
||||
*addr_b_ifindex = port->dev->ifindex;
|
||||
else
|
||||
*addr_b_ifindex = -1;
|
||||
} else {
|
||||
*addr_b_ifindex = -1;
|
||||
}
|
||||
|
||||
@@ -1108,7 +1108,7 @@ static void reqsk_timer_handler(struct timer_list *t)
|
||||
|
||||
if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
|
||||
/* delete timer */
|
||||
__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
|
||||
__inet_csk_reqsk_queue_drop(sk_listener, nreq, false);
|
||||
goto no_ownership;
|
||||
}
|
||||
|
||||
@@ -1134,7 +1134,7 @@ static void reqsk_timer_handler(struct timer_list *t)
|
||||
}
|
||||
|
||||
drop:
|
||||
__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
|
||||
__inet_csk_reqsk_queue_drop(oreq->rsk_listener, oreq, true);
|
||||
reqsk_put(oreq);
|
||||
}
|
||||
|
||||
|
||||
@@ -1501,13 +1501,11 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
|
||||
|
||||
static void __arpt_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
xt_entry_foreach(iter, loc_cpu_entry, private->size)
|
||||
@@ -1515,6 +1513,7 @@ static void __arpt_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int arpt_register_table(struct net *net,
|
||||
@@ -1522,13 +1521,11 @@ int arpt_register_table(struct net *net,
|
||||
const struct arpt_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1543,7 +1540,7 @@ int arpt_register_table(struct net *net,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct arpt_entry *iter;
|
||||
|
||||
@@ -1553,46 +1550,12 @@ int arpt_register_table(struct net *net,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__arpt_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arpt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
|
||||
|
||||
void arpt_unregister_table(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_ARP, name);
|
||||
|
||||
if (table)
|
||||
__arpt_unregister_table(net, table);
|
||||
|
||||
@@ -43,7 +43,7 @@ static int arptable_filter_table_init(struct net *net)
|
||||
|
||||
static void __net_exit arptable_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
arpt_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_ARP, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit arptable_filter_net_exit(struct net *net)
|
||||
@@ -58,32 +58,33 @@ static struct pernet_operations arptable_filter_net_ops = {
|
||||
|
||||
static int __init arptable_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
arptable_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arpt_do_table);
|
||||
if (IS_ERR(arpfilter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(arpfilter_ops))
|
||||
return PTR_ERR(arpfilter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&arptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter,
|
||||
arptable_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(arpfilter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(arpfilter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit arptable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
kfree(arpfilter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -1704,12 +1704,10 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
|
||||
static void __ipt_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
struct ipt_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
void *loc_cpu_entry;
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
@@ -1718,19 +1716,18 @@ static void __ipt_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ipt_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1745,7 +1742,7 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct ipt_entry *iter;
|
||||
|
||||
@@ -1755,51 +1752,12 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
/* No template? No need to do anything. This is used by 'nat' table, it registers
|
||||
* with the nat core instead of the netfilter core.
|
||||
*/
|
||||
if (!template_ops)
|
||||
return 0;
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__ipt_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
|
||||
void ipt_unregister_table_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_IPV4, name);
|
||||
|
||||
if (table)
|
||||
__ipt_unregister_table(net, table);
|
||||
@@ -1887,7 +1845,6 @@ static void __exit ip_tables_fini(void)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ipt_register_table);
|
||||
EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
|
||||
EXPORT_SYMBOL(ipt_unregister_table_exit);
|
||||
EXPORT_SYMBOL(ipt_do_table);
|
||||
module_init(ip_tables_init);
|
||||
|
||||
@@ -61,7 +61,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_filter_net_exit(struct net *net)
|
||||
@@ -77,32 +77,33 @@ static struct pernet_operations iptable_filter_net_ops = {
|
||||
|
||||
static int __init iptable_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
iptable_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(filter_ops))
|
||||
return PTR_ERR(filter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter,
|
||||
iptable_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
kfree(filter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ static int iptable_mangle_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "mangle");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "mangle");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_mangle_net_exit(struct net *net)
|
||||
@@ -111,32 +111,33 @@ static struct pernet_operations iptable_mangle_net_ops = {
|
||||
|
||||
static int __init iptable_mangle_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_mangler,
|
||||
iptable_mangle_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
mangle_ops = xt_hook_ops_alloc(&packet_mangler, iptable_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
ret = PTR_ERR(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(mangle_ops))
|
||||
return PTR_ERR(mangle_ops);
|
||||
|
||||
ret = register_pernet_subsys(&iptable_mangle_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_mangler,
|
||||
iptable_mangle_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_mangle_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
xt_unregister_template(&packet_mangler);
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
kfree(mangle_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -119,8 +119,11 @@ static int iptable_nat_table_init(struct net *net)
|
||||
}
|
||||
|
||||
ret = ipt_nat_register_lookups(net);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "nat");
|
||||
synchronize_rcu();
|
||||
ipt_unregister_table_exit(net, "nat");
|
||||
}
|
||||
|
||||
kfree(repl);
|
||||
return ret;
|
||||
@@ -129,6 +132,7 @@ static int iptable_nat_table_init(struct net *net)
|
||||
static void __net_exit iptable_nat_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_nat_unregister_lookups(net);
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "nat");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_nat_net_exit(struct net *net)
|
||||
|
||||
@@ -53,7 +53,7 @@ static int iptable_raw_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_raw_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "raw");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "raw");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_raw_net_exit(struct net *net)
|
||||
@@ -77,32 +77,32 @@ static int __init iptable_raw_init(void)
|
||||
pr_info("Enabling raw table before defrag\n");
|
||||
}
|
||||
|
||||
ret = xt_register_template(table,
|
||||
iptable_raw_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
xt_unregister_template(table);
|
||||
if (IS_ERR(rawtable_ops))
|
||||
return PTR_ERR(rawtable_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_raw_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(table,
|
||||
iptable_raw_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(table);
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_raw_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_raw_fini(void)
|
||||
{
|
||||
xt_unregister_template(&packet_raw);
|
||||
unregister_pernet_subsys(&iptable_raw_net_ops);
|
||||
kfree(rawtable_ops);
|
||||
xt_unregister_template(&packet_raw);
|
||||
}
|
||||
|
||||
module_init(iptable_raw_init);
|
||||
|
||||
@@ -50,7 +50,7 @@ static int iptable_security_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_security_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "security");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "security");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_security_net_exit(struct net *net)
|
||||
@@ -65,33 +65,34 @@ static struct pernet_operations iptable_security_net_ops = {
|
||||
|
||||
static int __init iptable_security_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&security_table,
|
||||
iptable_security_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
xt_unregister_template(&security_table);
|
||||
if (IS_ERR(sectbl_ops))
|
||||
return PTR_ERR(sectbl_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_security_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&security_table,
|
||||
iptable_security_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&security_table);
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_security_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_security_fini(void)
|
||||
{
|
||||
xt_unregister_template(&security_table);
|
||||
unregister_pernet_subsys(&iptable_security_net_ops);
|
||||
kfree(sectbl_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
}
|
||||
|
||||
module_init(iptable_security_init);
|
||||
|
||||
@@ -116,7 +116,8 @@ struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
|
||||
{
|
||||
struct tcp_ao_key *key;
|
||||
|
||||
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
|
||||
hlist_for_each_entry_rcu(key, &ao->head, node,
|
||||
sk_fullsock(sk) && lockdep_sock_is_held(sk)) {
|
||||
if ((sndid >= 0 && key->sndid != sndid) ||
|
||||
(rcvid >= 0 && key->rcvid != rcvid))
|
||||
continue;
|
||||
|
||||
@@ -36,11 +36,11 @@
|
||||
/* FL hash table */
|
||||
|
||||
#define FL_MAX_PER_SOCK 32
|
||||
#define FL_MAX_SIZE 4096
|
||||
#define FL_MAX_SIZE 8192
|
||||
#define FL_HASH_MASK 255
|
||||
#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
|
||||
|
||||
static atomic_t fl_size = ATOMIC_INIT(0);
|
||||
static int fl_size;
|
||||
static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
|
||||
|
||||
static void ip6_fl_gc(struct timer_list *unused);
|
||||
@@ -162,8 +162,9 @@ static void ip6_fl_gc(struct timer_list *unused)
|
||||
ttd = fl->expires;
|
||||
if (time_after_eq(now, ttd)) {
|
||||
*flp = fl->next;
|
||||
fl_size--;
|
||||
fl->fl_net->ipv6.flowlabel_count--;
|
||||
fl_free(fl);
|
||||
atomic_dec(&fl_size);
|
||||
continue;
|
||||
}
|
||||
if (!sched || time_before(ttd, sched))
|
||||
@@ -172,7 +173,7 @@ static void ip6_fl_gc(struct timer_list *unused)
|
||||
flp = &fl->next;
|
||||
}
|
||||
}
|
||||
if (!sched && atomic_read(&fl_size))
|
||||
if (!sched && fl_size)
|
||||
sched = now + FL_MAX_LINGER;
|
||||
if (sched) {
|
||||
mod_timer(&ip6_fl_gc_timer, sched);
|
||||
@@ -196,7 +197,8 @@ static void __net_exit ip6_fl_purge(struct net *net)
|
||||
atomic_read(&fl->users) == 0) {
|
||||
*flp = fl->next;
|
||||
fl_free(fl);
|
||||
atomic_dec(&fl_size);
|
||||
fl_size--;
|
||||
net->ipv6.flowlabel_count--;
|
||||
continue;
|
||||
}
|
||||
flp = &fl->next;
|
||||
@@ -210,10 +212,10 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
|
||||
{
|
||||
struct ip6_flowlabel *lfl;
|
||||
|
||||
lockdep_assert_held(&ip6_fl_lock);
|
||||
|
||||
fl->label = label & IPV6_FLOWLABEL_MASK;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&ip6_fl_lock);
|
||||
if (label == 0) {
|
||||
for (;;) {
|
||||
fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK;
|
||||
@@ -235,8 +237,6 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
|
||||
lfl = __fl_lookup(net, fl->label);
|
||||
if (lfl) {
|
||||
atomic_inc(&lfl->users);
|
||||
spin_unlock_bh(&ip6_fl_lock);
|
||||
rcu_read_unlock();
|
||||
return lfl;
|
||||
}
|
||||
}
|
||||
@@ -244,9 +244,8 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
|
||||
fl->lastuse = jiffies;
|
||||
fl->next = fl_ht[FL_HASH(fl->label)];
|
||||
rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
|
||||
atomic_inc(&fl_size);
|
||||
spin_unlock_bh(&ip6_fl_lock);
|
||||
rcu_read_unlock();
|
||||
fl_size++;
|
||||
net->ipv6.flowlabel_count++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -464,10 +463,17 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
|
||||
|
||||
static int mem_check(struct sock *sk)
|
||||
{
|
||||
int room = FL_MAX_SIZE - atomic_read(&fl_size);
|
||||
const int unpriv_total_limit = FL_MAX_SIZE - (FL_MAX_SIZE / 4);
|
||||
const int unpriv_user_limit = unpriv_total_limit / 2;
|
||||
struct net *net = sock_net(sk);
|
||||
int room;
|
||||
struct ipv6_fl_socklist *sfl;
|
||||
int count = 0;
|
||||
|
||||
lockdep_assert_held(&ip6_fl_lock);
|
||||
|
||||
room = FL_MAX_SIZE - fl_size;
|
||||
|
||||
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
|
||||
return 0;
|
||||
|
||||
@@ -478,7 +484,9 @@ static int mem_check(struct sock *sk)
|
||||
|
||||
if (room <= 0 ||
|
||||
((count >= FL_MAX_PER_SOCK ||
|
||||
(count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
|
||||
(count > 0 && room < FL_MAX_SIZE / 2) ||
|
||||
room < FL_MAX_SIZE / 4 ||
|
||||
net->ipv6.flowlabel_count >= unpriv_user_limit) &&
|
||||
!capable(CAP_NET_ADMIN)))
|
||||
return -ENOBUFS;
|
||||
|
||||
@@ -692,11 +700,19 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
|
||||
if (!sfl1)
|
||||
goto done;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&ip6_fl_lock);
|
||||
err = mem_check(sk);
|
||||
if (err == 0)
|
||||
fl1 = fl_intern(net, fl, freq->flr_label);
|
||||
else
|
||||
fl1 = NULL;
|
||||
spin_unlock_bh(&ip6_fl_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (err != 0)
|
||||
goto done;
|
||||
|
||||
fl1 = fl_intern(net, fl, freq->flr_label);
|
||||
if (fl1)
|
||||
goto recheck;
|
||||
|
||||
|
||||
@@ -1713,12 +1713,10 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
|
||||
static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
void *loc_cpu_entry;
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
@@ -1727,19 +1725,18 @@ static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ip6t_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1754,7 +1751,7 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
@@ -1764,48 +1761,12 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
if (!template_ops)
|
||||
return 0;
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__ip6t_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ip6t_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
|
||||
void ip6t_unregister_table_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_IPV6, name);
|
||||
|
||||
if (table)
|
||||
__ip6t_unregister_table(net, table);
|
||||
@@ -1894,7 +1855,6 @@ static void __exit ip6_tables_fini(void)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ip6t_register_table);
|
||||
EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
|
||||
EXPORT_SYMBOL(ip6t_unregister_table_exit);
|
||||
EXPORT_SYMBOL(ip6t_do_table);
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_filter_net_exit(struct net *net)
|
||||
@@ -76,32 +76,32 @@ static struct pernet_operations ip6table_filter_net_ops = {
|
||||
|
||||
static int __init ip6table_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
ip6table_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
filter_ops = xt_hook_ops_alloc(&packet_filter, ip6t_do_table);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(filter_ops))
|
||||
return PTR_ERR(filter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter, ip6table_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
kfree(filter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ static int ip6table_mangle_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "mangle");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "mangle");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_mangle_net_exit(struct net *net)
|
||||
@@ -104,32 +104,33 @@ static struct pernet_operations ip6table_mangle_net_ops = {
|
||||
|
||||
static int __init ip6table_mangle_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_mangler,
|
||||
ip6table_mangle_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
mangle_ops = xt_hook_ops_alloc(&packet_mangler, ip6table_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
if (IS_ERR(mangle_ops))
|
||||
return PTR_ERR(mangle_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_mangler,
|
||||
ip6table_mangle_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_mangle_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
xt_unregister_template(&packet_mangler);
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
kfree(mangle_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -121,8 +121,11 @@ static int ip6table_nat_table_init(struct net *net)
|
||||
}
|
||||
|
||||
ret = ip6t_nat_register_lookups(net);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "nat");
|
||||
synchronize_rcu();
|
||||
ip6t_unregister_table_exit(net, "nat");
|
||||
}
|
||||
|
||||
kfree(repl);
|
||||
return ret;
|
||||
@@ -131,6 +134,7 @@ static int ip6table_nat_table_init(struct net *net)
|
||||
static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_nat_unregister_lookups(net);
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "nat");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_nat_net_exit(struct net *net)
|
||||
|
||||
@@ -52,7 +52,7 @@ static int ip6table_raw_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "raw");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "raw");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_raw_net_exit(struct net *net)
|
||||
@@ -75,31 +75,31 @@ static int __init ip6table_raw_init(void)
|
||||
pr_info("Enabling raw table before defrag\n");
|
||||
}
|
||||
|
||||
ret = xt_register_template(table, ip6table_raw_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
rawtable_ops = xt_hook_ops_alloc(table, ip6t_do_table);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
xt_unregister_template(table);
|
||||
if (IS_ERR(rawtable_ops))
|
||||
return PTR_ERR(rawtable_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_raw_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(table, ip6table_raw_table_init);
|
||||
if (ret < 0) {
|
||||
kfree(rawtable_ops);
|
||||
xt_unregister_template(table);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_raw_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
xt_unregister_template(&packet_raw);
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
kfree(rawtable_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ static int ip6table_security_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_security_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "security");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "security");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_security_net_exit(struct net *net)
|
||||
@@ -64,32 +64,33 @@ static struct pernet_operations ip6table_security_net_ops = {
|
||||
|
||||
static int __init ip6table_security_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&security_table,
|
||||
ip6table_security_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
sectbl_ops = xt_hook_ops_alloc(&security_table, ip6t_do_table);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
xt_unregister_template(&security_table);
|
||||
if (IS_ERR(sectbl_ops))
|
||||
return PTR_ERR(sectbl_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_security_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&security_table,
|
||||
ip6table_security_table_init);
|
||||
if (ret < 0) {
|
||||
kfree(sectbl_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_security_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
kfree(sectbl_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, NULL);
|
||||
write_pnet(&exp->net, net);
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
exp->zone = ct->zone;
|
||||
|
||||
@@ -1811,14 +1811,17 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||
spin_lock_bh(&nf_conntrack_expect_lock);
|
||||
exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl));
|
||||
if (exp) {
|
||||
struct nf_conntrack_helper *assign_helper;
|
||||
|
||||
/* Welcome, Mr. Bond. We've been expecting you... */
|
||||
__set_bit(IPS_EXPECTED_BIT, &ct->status);
|
||||
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
|
||||
ct->master = exp->master;
|
||||
if (exp->helper) {
|
||||
assign_helper = rcu_dereference(exp->assign_helper);
|
||||
if (assign_helper) {
|
||||
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
|
||||
if (help)
|
||||
rcu_assign_pointer(help->helper, exp->helper);
|
||||
rcu_assign_pointer(help->helper, assign_helper);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
|
||||
@@ -344,6 +344,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
|
||||
helper = rcu_dereference(help->helper);
|
||||
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, NULL);
|
||||
write_pnet(&exp->net, net);
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
exp->zone = ct->zone;
|
||||
|
||||
@@ -643,7 +643,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, &nf_conntrack_helper_h245);
|
||||
rcu_assign_pointer(exp->assign_helper, &nf_conntrack_helper_h245);
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
|
||||
@@ -767,7 +767,7 @@ static int expect_callforwarding(struct sk_buff *skb,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
|
||||
@@ -1234,7 +1234,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
@@ -1306,7 +1306,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_UDP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_ras);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_ras);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect RAS ");
|
||||
@@ -1523,7 +1523,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
@@ -1577,7 +1577,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
|
||||
@@ -400,6 +400,11 @@ static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data)
|
||||
|
||||
this = rcu_dereference_protected(exp->helper,
|
||||
lockdep_is_held(&nf_conntrack_expect_lock));
|
||||
if (this == me)
|
||||
return true;
|
||||
|
||||
this = rcu_dereference_protected(exp->assign_helper,
|
||||
lockdep_is_held(&nf_conntrack_expect_lock));
|
||||
return this == me;
|
||||
}
|
||||
|
||||
|
||||
@@ -2634,6 +2634,7 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
|
||||
const struct nf_conntrack_helper *assign_helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask);
|
||||
|
||||
@@ -2860,6 +2861,7 @@ static int
|
||||
ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
u32 portid, u32 report)
|
||||
{
|
||||
struct nf_conntrack_helper *assign_helper = NULL;
|
||||
struct nlattr *cda[CTA_EXPECT_MAX+1];
|
||||
struct nf_conntrack_tuple tuple, mask;
|
||||
struct nf_conntrack_expect *exp;
|
||||
@@ -2870,13 +2872,26 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (!cda[CTA_EXPECT_TUPLE] || !cda[CTA_EXPECT_MASK])
|
||||
return -EINVAL;
|
||||
|
||||
err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
|
||||
ct, &tuple, &mask);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_EXPECT_HELP_NAME]) {
|
||||
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
|
||||
|
||||
assign_helper = __nf_conntrack_helper_find(helpname,
|
||||
nf_ct_l3num(ct),
|
||||
tuple.dst.protonum);
|
||||
if (!assign_helper)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
|
||||
&tuple, &mask);
|
||||
assign_helper, &tuple, &mask);
|
||||
if (IS_ERR(exp))
|
||||
return PTR_ERR(exp);
|
||||
|
||||
@@ -3515,6 +3530,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
const struct nf_conntrack_helper *assign_helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask)
|
||||
{
|
||||
@@ -3568,6 +3584,7 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
exp->zone = ct->zone;
|
||||
#endif
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, assign_helper);
|
||||
exp->tuple = *tuple;
|
||||
exp->mask.src.u3 = mask->src.u3;
|
||||
exp->mask.src.u.all = mask->src.u.all;
|
||||
@@ -3623,7 +3640,7 @@ ctnetlink_create_expect(struct net *net,
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
rcu_read_lock();
|
||||
exp = ctnetlink_alloc_expect(cda, ct, &tuple, &mask);
|
||||
exp = ctnetlink_alloc_expect(cda, ct, NULL, &tuple, &mask);
|
||||
if (IS_ERR(exp)) {
|
||||
err = PTR_ERR(exp);
|
||||
goto err_rcu;
|
||||
|
||||
@@ -1366,6 +1366,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
|
||||
goto store_cseq;
|
||||
}
|
||||
|
||||
helper = rcu_dereference(nfct_help(ct)->helper);
|
||||
if (!helper)
|
||||
return NF_DROP;
|
||||
|
||||
exp = nf_ct_expect_alloc(ct);
|
||||
if (!exp) {
|
||||
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
|
||||
@@ -1376,14 +1380,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
|
||||
if (sip_direct_signalling)
|
||||
saddr = &ct->tuplehash[!dir].tuple.src.u3;
|
||||
|
||||
helper = rcu_dereference(nfct_help(ct)->helper);
|
||||
if (!helper)
|
||||
return NF_DROP;
|
||||
|
||||
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
|
||||
saddr, &daddr, proto, NULL, &port);
|
||||
exp->timeout.expires = sip_timeout * HZ;
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, helper);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
|
||||
|
||||
hooks = rcu_dereference(nf_nat_sip_hooks);
|
||||
|
||||
@@ -1334,6 +1334,8 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) != 0)
|
||||
regs->verdict.code = NF_DROP;
|
||||
|
||||
nf_ct_expect_put(exp);
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_ct_expect_policy[NFTA_CT_EXPECT_MAX + 1] = {
|
||||
|
||||
@@ -55,6 +55,9 @@ static struct list_head xt_templates[NFPROTO_NUMPROTO];
|
||||
|
||||
struct xt_pernet {
|
||||
struct list_head tables[NFPROTO_NUMPROTO];
|
||||
|
||||
/* stash area used during netns exit */
|
||||
struct list_head dead_tables[NFPROTO_NUMPROTO];
|
||||
};
|
||||
|
||||
struct compat_delta {
|
||||
@@ -1472,11 +1475,9 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
|
||||
}
|
||||
EXPORT_SYMBOL(xt_counters_alloc);
|
||||
|
||||
struct xt_table_info *
|
||||
xt_replace_table(struct xt_table *table,
|
||||
unsigned int num_counters,
|
||||
struct xt_table_info *newinfo,
|
||||
int *error)
|
||||
static struct xt_table_info *
|
||||
do_replace_table(struct xt_table *table, unsigned int num_counters,
|
||||
struct xt_table_info *newinfo, int *error)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
unsigned int cpu;
|
||||
@@ -1531,30 +1532,54 @@ xt_replace_table(struct xt_table *table,
|
||||
}
|
||||
}
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
!private->number ? AUDIT_XT_OP_REGISTER :
|
||||
AUDIT_XT_OP_REPLACE,
|
||||
GFP_KERNEL);
|
||||
return private;
|
||||
}
|
||||
|
||||
struct xt_table_info *
|
||||
xt_replace_table(struct xt_table *table, unsigned int num_counters,
|
||||
struct xt_table_info *newinfo,
|
||||
int *error)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
|
||||
private = do_replace_table(table, num_counters, newinfo, error);
|
||||
if (private)
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_REPLACE,
|
||||
GFP_KERNEL);
|
||||
|
||||
return private;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_replace_table);
|
||||
|
||||
struct xt_table *xt_register_table(struct net *net,
|
||||
const struct xt_table *input_table,
|
||||
const struct nf_hook_ops *template_ops,
|
||||
struct xt_table_info *bootstrap,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *t, *table = NULL;
|
||||
struct nf_hook_ops *ops = NULL;
|
||||
struct xt_table_info *private;
|
||||
struct xt_table *t, *table;
|
||||
int ret;
|
||||
unsigned int num_ops;
|
||||
int ret = -EINVAL;
|
||||
|
||||
num_ops = hweight32(input_table->valid_hooks);
|
||||
if (num_ops == 0)
|
||||
goto out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (template_ops) {
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Don't add one object to multiple lists. */
|
||||
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
|
||||
if (!table) {
|
||||
ret = -ENOMEM;
|
||||
if (!table)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&xt[table->af].mutex);
|
||||
/* Don't autoload: we'd eat our tail... */
|
||||
@@ -1568,7 +1593,7 @@ struct xt_table *xt_register_table(struct net *net,
|
||||
/* Simplifies replace_table code. */
|
||||
table->private = bootstrap;
|
||||
|
||||
if (!xt_replace_table(table, 0, newinfo, &ret))
|
||||
if (!do_replace_table(table, 0, newinfo, &ret))
|
||||
goto unlock;
|
||||
|
||||
private = table->private;
|
||||
@@ -1577,34 +1602,122 @@ struct xt_table *xt_register_table(struct net *net,
|
||||
/* save number of initial entries */
|
||||
private->initial_entries = private->number;
|
||||
|
||||
if (ops) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = table;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0) {
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
/* nf_register_net_hooks() might have published a
|
||||
* base chain before internal error unwind.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
goto out;
|
||||
}
|
||||
|
||||
table->ops = ops;
|
||||
}
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_REGISTER, GFP_KERNEL);
|
||||
|
||||
list_add(&table->list, &xt_net->tables[table->af]);
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
return table;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
kfree(table);
|
||||
out:
|
||||
kfree(table);
|
||||
kfree(ops);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_register_table);
|
||||
|
||||
void *xt_unregister_table(struct xt_table *table)
|
||||
/**
|
||||
* xt_unregister_table_pre_exit - pre-shutdown unregister of a table
|
||||
* @net: network namespace
|
||||
* @af: address family (e.g., NFPROTO_IPV4, NFPROTO_IPV6)
|
||||
* @name: name of the table to unregister
|
||||
*
|
||||
* Unregisters the specified netfilter table from the given network namespace
|
||||
* and also unregisters the hooks from netfilter core: no new packets will be
|
||||
* processed.
|
||||
*
|
||||
* This must be called prior to xt_unregister_table_exit() from the pernet
|
||||
* .pre_exit callback. After this call, the table is no longer visible to
|
||||
* the get/setsockopt path. In case of rmmod, module exit path must have
|
||||
* called xt_unregister_template() prior to unregistering pernet ops to
|
||||
* prevent re-instantiation of the table.
|
||||
*
|
||||
* See also: xt_unregister_table_exit()
|
||||
*/
|
||||
void xt_unregister_table_pre_exit(struct net *net, u8 af, const char *name)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *t;
|
||||
|
||||
mutex_lock(&xt[table->af].mutex);
|
||||
private = table->private;
|
||||
list_del(&table->list);
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
kfree(table->ops);
|
||||
kfree(table);
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(t, &xt_net->tables[af], list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_move(&t->list, &xt_net->dead_tables[af]);
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
return private;
|
||||
if (t->ops) /* nat table registers with nat core, t->ops is NULL. */
|
||||
nf_unregister_net_hooks(net, t->ops, hweight32(t->valid_hooks));
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_unregister_table);
|
||||
EXPORT_SYMBOL(xt_unregister_table_pre_exit);
|
||||
|
||||
/**
|
||||
* xt_unregister_table_exit - remove a table during namespace teardown
|
||||
* @net: the network namespace from which to unregister the table
|
||||
* @af: address family (e.g., NFPROTO_IPV4, NFPROTO_IPV6)
|
||||
* @name: name of the table to unregister
|
||||
*
|
||||
* Completes the unregister process for a table. This must be called from
|
||||
* the pernet ops .exit callback. This is the second stage after
|
||||
* xt_unregister_table_pre_exit().
|
||||
*
|
||||
* pair with xt_unregister_table_pre_exit() during namespace shutdown.
|
||||
*
|
||||
* Return: the unregistered table or NULL if the table was never
|
||||
* instantiated. The caller needs to kfree() the table after it
|
||||
* has removed the family specific matches/targets.
|
||||
*/
|
||||
struct xt_table *xt_unregister_table_exit(struct net *net, u8 af, const char *name)
|
||||
{
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *table;
|
||||
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(table, &xt_net->dead_tables[af], list) {
|
||||
struct nf_hook_ops *ops = NULL;
|
||||
|
||||
if (strcmp(table->name, name) != 0)
|
||||
continue;
|
||||
|
||||
list_del(&table->list);
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, table->private->number,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
swap(table->ops, ops);
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
kfree(ops);
|
||||
return table;
|
||||
}
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_unregister_table_exit);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
@@ -2051,8 +2164,10 @@ static int __net_init xt_net_init(struct net *net)
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
|
||||
INIT_LIST_HEAD(&xt_net->tables[i]);
|
||||
INIT_LIST_HEAD(&xt_net->dead_tables[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2061,8 +2176,10 @@ static void __net_exit xt_net_exit(struct net *net)
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
|
||||
WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
|
||||
WARN_ON_ONCE(!list_empty(&xt_net->dead_tables[i]));
|
||||
}
|
||||
}
|
||||
|
||||
static struct pernet_operations xt_net_ops = {
|
||||
|
||||
@@ -1972,8 +1972,10 @@ int genlmsg_multicast_allns(const struct genl_family *family,
|
||||
struct sk_buff *skb, u32 portid,
|
||||
unsigned int group)
|
||||
{
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps))
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps)) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
group = family->mcgrp_offset + group;
|
||||
return genlmsg_mcast(skb, portid, group);
|
||||
@@ -1986,8 +1988,10 @@ void genl_notify(const struct genl_family *family, struct sk_buff *skb,
|
||||
struct net *net = genl_info_net(info);
|
||||
struct sock *sk = net->genl_sock;
|
||||
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps))
|
||||
if (WARN_ON_ONCE(group >= family->n_mcgrps)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
group = family->mcgrp_offset + group;
|
||||
nlmsg_notify(sk, skb, info->snd_portid, group,
|
||||
|
||||
@@ -448,6 +448,7 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
|
||||
|
||||
for (i = 0; i < rm->data.op_nents; i++)
|
||||
put_page(sg_page(&rm->data.op_sg[i]));
|
||||
rm->data.op_nents = 0;
|
||||
mmp = &rm->data.op_mmp_znotifier->z_mmp;
|
||||
mm_unaccount_pinned_pages(mmp);
|
||||
ret = -EFAULT;
|
||||
|
||||
@@ -243,6 +243,20 @@ static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
|
||||
return q->dequeue(sch);
|
||||
}
|
||||
|
||||
static void cbs_reset(struct Qdisc *sch)
|
||||
{
|
||||
struct cbs_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
/* Nothing to do if we couldn't create the underlying qdisc */
|
||||
if (!q->qdisc)
|
||||
return;
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
qdisc_watchdog_cancel(&q->watchdog);
|
||||
q->credits = 0;
|
||||
q->last = 0;
|
||||
}
|
||||
|
||||
static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = {
|
||||
[TCA_CBS_PARMS] = { .len = sizeof(struct tc_cbs_qopt) },
|
||||
};
|
||||
@@ -540,7 +554,7 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
|
||||
.dequeue = cbs_dequeue,
|
||||
.peek = qdisc_peek_dequeued,
|
||||
.init = cbs_init,
|
||||
.reset = qdisc_reset_queue,
|
||||
.reset = cbs_reset,
|
||||
.destroy = cbs_destroy,
|
||||
.change = cbs_change,
|
||||
.dump = cbs_dump,
|
||||
|
||||
@@ -938,6 +938,8 @@ static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
int err;
|
||||
|
||||
sch->flags |= TCQ_F_DEQUEUE_DROPS;
|
||||
hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_ABS_PINNED_SOFT);
|
||||
|
||||
q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
TC_H_MAKE(sch->handle, 1), extack);
|
||||
@@ -950,8 +952,6 @@ static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
q->sch = sch;
|
||||
dualpi2_reset_default(sch);
|
||||
hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_ABS_PINNED_SOFT);
|
||||
|
||||
if (opt && nla_len(opt)) {
|
||||
err = dualpi2_change(sch, opt, extack);
|
||||
|
||||
@@ -1986,6 +1986,15 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
||||
goto out_unlock;
|
||||
|
||||
iov_iter_revert(&msg->msg_iter, err);
|
||||
|
||||
/* sctp_sendmsg_to_asoc() may have released the socket
|
||||
* lock (sctp_wait_for_sndbuf), during which other
|
||||
* associations on ep->asocs could have been peeled
|
||||
* off or freed. @asoc itself is revalidated by the
|
||||
* base.dead and base.sk checks in sctp_wait_for_sndbuf,
|
||||
* so re-derive the cached cursor from it.
|
||||
*/
|
||||
tmp = list_next_entry(asoc, asocs);
|
||||
}
|
||||
|
||||
goto out_unlock;
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
|
||||
#define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
|
||||
|
||||
static_assert(NET_SHAPER_ID_UNSPEC == NET_SHAPER_MAX_HANDLE_ID + 1);
|
||||
|
||||
struct net_shaper_hierarchy {
|
||||
struct xarray shapers;
|
||||
};
|
||||
@@ -90,6 +92,12 @@ static int net_shaper_handle_size(void)
|
||||
nla_total_size(sizeof(u32)));
|
||||
}
|
||||
|
||||
static int net_shaper_group_reply_size(void)
|
||||
{
|
||||
return nla_total_size(sizeof(u32)) + /* NET_SHAPER_A_IFINDEX */
|
||||
net_shaper_handle_size(); /* NET_SHAPER_A_HANDLE */
|
||||
}
|
||||
|
||||
static int net_shaper_fill_binding(struct sk_buff *msg,
|
||||
const struct net_shaper_binding *binding,
|
||||
u32 type)
|
||||
@@ -275,11 +283,13 @@ static void net_shaper_default_parent(const struct net_shaper_handle *handle,
|
||||
parent->id = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
|
||||
* it's cleared by xa_store().
|
||||
/* MARK_0 is already in use due to XA_FLAGS_ALLOC. The VALID mark is set on
|
||||
* an entry only after the device-side configuration has completed
|
||||
* successfully (see net_shaper_commit()). Lookups and dumps must filter on
|
||||
* this mark to avoid exposing tentative entries inserted by
|
||||
* net_shaper_pre_insert() while the driver call is still in flight.
|
||||
*/
|
||||
#define NET_SHAPER_NOT_VALID XA_MARK_1
|
||||
#define NET_SHAPER_VALID XA_MARK_1
|
||||
|
||||
static struct net_shaper *
|
||||
net_shaper_lookup(struct net_shaper_binding *binding,
|
||||
@@ -289,10 +299,14 @@ net_shaper_lookup(struct net_shaper_binding *binding,
|
||||
struct net_shaper_hierarchy *hierarchy;
|
||||
|
||||
hierarchy = net_shaper_hierarchy_rcu(binding);
|
||||
if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
|
||||
NET_SHAPER_NOT_VALID))
|
||||
if (!hierarchy || !xa_get_mark(&hierarchy->shapers, index,
|
||||
NET_SHAPER_VALID))
|
||||
return NULL;
|
||||
|
||||
/* Pairs with smp_wmb() in net_shaper_commit(): if the entry is
|
||||
* valid, its contents must be visible too.
|
||||
*/
|
||||
smp_rmb();
|
||||
return xa_load(&hierarchy->shapers, index);
|
||||
}
|
||||
|
||||
@@ -348,7 +362,7 @@ static int net_shaper_pre_insert(struct net_shaper_binding *binding,
|
||||
handle->id == NET_SHAPER_ID_UNSPEC) {
|
||||
u32 min, max;
|
||||
|
||||
handle->id = NET_SHAPER_ID_MASK - 1;
|
||||
handle->id = NET_SHAPER_MAX_HANDLE_ID;
|
||||
max = net_shaper_handle_to_index(handle);
|
||||
handle->id = 0;
|
||||
min = net_shaper_handle_to_index(handle);
|
||||
@@ -370,13 +384,10 @@ static int net_shaper_pre_insert(struct net_shaper_binding *binding,
|
||||
goto free_id;
|
||||
}
|
||||
|
||||
/* Mark 'tentative' shaper inside the hierarchy container.
|
||||
* xa_set_mark is a no-op if the previous store fails.
|
||||
/* Insert as 'tentative' (no VALID mark). The mark will be set by
|
||||
* net_shaper_commit() once the driver-side configuration succeeds.
|
||||
*/
|
||||
xa_lock(&hierarchy->shapers);
|
||||
prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
|
||||
__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
|
||||
xa_unlock(&hierarchy->shapers);
|
||||
prev = xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
|
||||
if (xa_err(prev)) {
|
||||
NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
|
||||
kfree_rcu(cur, rcu);
|
||||
@@ -413,9 +424,9 @@ static void net_shaper_commit(struct net_shaper_binding *binding,
|
||||
/* Successful update: drop the tentative mark
|
||||
* and update the hierarchy container.
|
||||
*/
|
||||
__xa_clear_mark(&hierarchy->shapers, index,
|
||||
NET_SHAPER_NOT_VALID);
|
||||
*cur = shapers[i];
|
||||
smp_wmb();
|
||||
__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_VALID);
|
||||
}
|
||||
xa_unlock(&hierarchy->shapers);
|
||||
}
|
||||
@@ -431,8 +442,9 @@ static void net_shaper_rollback(struct net_shaper_binding *binding)
|
||||
return;
|
||||
|
||||
xa_lock(&hierarchy->shapers);
|
||||
xa_for_each_marked(&hierarchy->shapers, index, cur,
|
||||
NET_SHAPER_NOT_VALID) {
|
||||
xa_for_each(&hierarchy->shapers, index, cur) {
|
||||
if (xa_get_mark(&hierarchy->shapers, index, NET_SHAPER_VALID))
|
||||
continue;
|
||||
__xa_erase(&hierarchy->shapers, index);
|
||||
kfree(cur);
|
||||
}
|
||||
@@ -465,10 +477,21 @@ static int net_shaper_parse_handle(const struct nlattr *attr,
|
||||
* shaper (any other value).
|
||||
*/
|
||||
id_attr = tb[NET_SHAPER_A_HANDLE_ID];
|
||||
if (id_attr)
|
||||
if (id_attr) {
|
||||
id = nla_get_u32(id_attr);
|
||||
else if (handle->scope == NET_SHAPER_SCOPE_NODE)
|
||||
} else if (handle->scope == NET_SHAPER_SCOPE_NODE) {
|
||||
id = NET_SHAPER_ID_UNSPEC;
|
||||
} else if (handle->scope == NET_SHAPER_SCOPE_QUEUE) {
|
||||
NL_SET_ERR_ATTR_MISS(info->extack, attr,
|
||||
NET_SHAPER_A_HANDLE_ID);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (id && handle->scope == NET_SHAPER_SCOPE_NETDEV) {
|
||||
NL_SET_ERR_MSG_ATTR(info->extack, id_attr,
|
||||
"Netdev scope is a singleton, must use ID 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
handle->id = id;
|
||||
return 0;
|
||||
@@ -836,7 +859,12 @@ int net_shaper_nl_get_dumpit(struct sk_buff *skb,
|
||||
goto out_unlock;
|
||||
|
||||
for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
|
||||
U32_MAX, XA_PRESENT)); ctx->start_index++) {
|
||||
U32_MAX, NET_SHAPER_VALID));
|
||||
ctx->start_index++) {
|
||||
/* Pairs with smp_wmb() in net_shaper_commit(): the entry
|
||||
* is marked VALID, so its contents must be visible too.
|
||||
*/
|
||||
smp_rmb();
|
||||
ret = net_shaper_fill_one(skb, binding, shaper, info);
|
||||
if (ret)
|
||||
break;
|
||||
@@ -932,6 +960,46 @@ static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
|
||||
return memcmp(a, b, sizeof(*a));
|
||||
}
|
||||
|
||||
static int net_shaper_parse_leaves(struct net_shaper_binding *binding,
|
||||
struct genl_info *info,
|
||||
const struct net_shaper *node,
|
||||
struct net_shaper *leaves,
|
||||
int leaves_count)
|
||||
{
|
||||
struct nlattr *attr;
|
||||
int i, j, ret, rem;
|
||||
|
||||
i = 0;
|
||||
nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
|
||||
genlmsg_data(info->genlhdr),
|
||||
genlmsg_len(info->genlhdr), rem) {
|
||||
if (WARN_ON_ONCE(i >= leaves_count))
|
||||
return -EINVAL;
|
||||
|
||||
ret = net_shaper_parse_leaf(binding, attr, info,
|
||||
node, &leaves[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Reject duplicates */
|
||||
for (j = 0; j < i; j++) {
|
||||
if (net_shaper_handle_cmp(&leaves[i].handle,
|
||||
&leaves[j].handle))
|
||||
continue;
|
||||
|
||||
NL_SET_ERR_MSG_ATTR_FMT(info->extack, attr,
|
||||
"Duplicate leaf shaper %d:%d",
|
||||
leaves[i].handle.scope,
|
||||
leaves[i].handle.id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int net_shaper_parent_from_leaves(int leaves_count,
|
||||
const struct net_shaper *leaves,
|
||||
struct net_shaper *node,
|
||||
@@ -964,15 +1032,22 @@ static int __net_shaper_group(struct net_shaper_binding *binding,
|
||||
int i, ret;
|
||||
|
||||
if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
|
||||
struct net_shaper *cur = NULL;
|
||||
|
||||
new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
|
||||
|
||||
if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
|
||||
/* The related attribute is not available when
|
||||
* reaching here from the delete() op.
|
||||
*/
|
||||
NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
|
||||
node->handle.scope, node->handle.id);
|
||||
return -ENOENT;
|
||||
if (!new_node) {
|
||||
cur = net_shaper_lookup(binding, &node->handle);
|
||||
if (!cur) {
|
||||
/* The related attribute is not available
|
||||
* when reaching here from the delete() op.
|
||||
*/
|
||||
NL_SET_ERR_MSG_FMT(extack,
|
||||
"Node shaper %d:%d does not exist",
|
||||
node->handle.scope,
|
||||
node->handle.id);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
/* When unspecified, the node parent scope is inherited from
|
||||
@@ -986,6 +1061,15 @@ static int __net_shaper_group(struct net_shaper_binding *binding,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (cur && net_shaper_handle_cmp(&cur->parent,
|
||||
&node->parent)) {
|
||||
NL_SET_ERR_MSG_FMT(extack,
|
||||
"Cannot reparent node shaper %d:%d",
|
||||
node->handle.scope,
|
||||
node->handle.id);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
} else {
|
||||
net_shaper_default_parent(&node->handle, &node->parent);
|
||||
}
|
||||
@@ -1162,7 +1246,7 @@ static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
|
||||
free_msg:
|
||||
/* Should never happen as msg is pre-allocated with enough space. */
|
||||
WARN_ONCE(true, "calculated message payload length (%d)",
|
||||
net_shaper_handle_size());
|
||||
net_shaper_group_reply_size());
|
||||
nlmsg_free(msg);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
@@ -1172,10 +1256,9 @@ int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
struct net_shaper **old_nodes, *leaves, node = {};
|
||||
struct net_shaper_hierarchy *hierarchy;
|
||||
struct net_shaper_binding *binding;
|
||||
int i, ret, rem, leaves_count;
|
||||
int i, ret, leaves_count;
|
||||
int old_nodes_count = 0;
|
||||
struct sk_buff *msg;
|
||||
struct nlattr *attr;
|
||||
|
||||
if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
|
||||
return -EINVAL;
|
||||
@@ -1203,26 +1286,19 @@ int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
if (ret)
|
||||
goto free_leaves;
|
||||
|
||||
i = 0;
|
||||
nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
|
||||
genlmsg_data(info->genlhdr),
|
||||
genlmsg_len(info->genlhdr), rem) {
|
||||
if (WARN_ON_ONCE(i >= leaves_count))
|
||||
goto free_leaves;
|
||||
|
||||
ret = net_shaper_parse_leaf(binding, attr, info,
|
||||
&node, &leaves[i]);
|
||||
if (ret)
|
||||
goto free_leaves;
|
||||
i++;
|
||||
}
|
||||
ret = net_shaper_parse_leaves(binding, info, &node,
|
||||
leaves, leaves_count);
|
||||
if (ret)
|
||||
goto free_leaves;
|
||||
|
||||
/* Prepare the msg reply in advance, to avoid device operation
|
||||
* rollback on allocation failure.
|
||||
*/
|
||||
msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
|
||||
if (!msg)
|
||||
msg = genlmsg_new(net_shaper_group_reply_size(), GFP_KERNEL);
|
||||
if (!msg) {
|
||||
ret = -ENOMEM;
|
||||
goto free_leaves;
|
||||
}
|
||||
|
||||
hierarchy = net_shaper_hierarchy_setup(binding);
|
||||
if (!hierarchy) {
|
||||
|
||||
@@ -11,10 +11,15 @@
|
||||
|
||||
#include <uapi/linux/net_shaper.h>
|
||||
|
||||
/* Integer value ranges */
|
||||
static const struct netlink_range_validation net_shaper_a_handle_id_range = {
|
||||
.max = NET_SHAPER_MAX_HANDLE_ID,
|
||||
};
|
||||
|
||||
/* Common nested types */
|
||||
const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1] = {
|
||||
[NET_SHAPER_A_HANDLE_SCOPE] = NLA_POLICY_MAX(NLA_U32, 3),
|
||||
[NET_SHAPER_A_HANDLE_ID] = { .type = NLA_U32, },
|
||||
[NET_SHAPER_A_HANDLE_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &net_shaper_a_handle_id_range),
|
||||
};
|
||||
|
||||
const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1] = {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
|
||||
#include <uapi/linux/net_shaper.h>
|
||||
|
||||
#define NET_SHAPER_MAX_HANDLE_ID 67108862
|
||||
|
||||
/* Common nested types */
|
||||
extern const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1];
|
||||
extern const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1];
|
||||
|
||||
@@ -1400,7 +1400,8 @@ smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm *aclc,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
|
||||
if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
|
||||
if (ini->ism_dev[i] &&
|
||||
ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
|
||||
ini->ism_selected = i;
|
||||
return 0;
|
||||
}
|
||||
@@ -3054,18 +3055,17 @@ static int __smc_setsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
smc = smc_sk(sk);
|
||||
|
||||
/* pre-fetch user data outside the lock */
|
||||
if (optname == SMC_LIMIT_HS) {
|
||||
if (optlen < sizeof(int))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&val, optval, sizeof(int)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
switch (optname) {
|
||||
case SMC_LIMIT_HS:
|
||||
if (optlen < sizeof(int)) {
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (copy_from_sockptr(&val, optval, sizeof(int))) {
|
||||
rc = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
smc->limit_smc_hs = !!val;
|
||||
rc = 0;
|
||||
break;
|
||||
|
||||
@@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(smc_msg_event,
|
||||
__field(const void *, smc)
|
||||
__field(u64, net_cookie)
|
||||
__field(size_t, len)
|
||||
__string(name, smc->conn.lnk->ibname)
|
||||
__string(name, smc->conn.lnk ? smc->conn.lnk->ibname : "")
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
||||
@@ -789,23 +789,33 @@ static int tls_push_record(struct sock *sk, int flags,
|
||||
i = msg_pl->sg.end;
|
||||
sk_msg_iter_var_prev(i);
|
||||
|
||||
/* msg_pl->sg.data is a ring; data[MAX+1] is reserved for the wrap
|
||||
* link (frags won't use it). 'i' is now the last filled entry:
|
||||
*
|
||||
* i end start
|
||||
* v v v [ rsv ]
|
||||
* [ d ][ d ][ ][ ]...[ ][ d ][ d ][ d ][chain]
|
||||
* ^ END v
|
||||
* `-----------------------------------------'
|
||||
*
|
||||
* Note that SGL does not allow chain-after-chain, so for TLS 1.3,
|
||||
* we must make sure we don't create the wrap entry and then chain
|
||||
* link to content_type immediately at index 0.
|
||||
*/
|
||||
if (i < msg_pl->sg.start)
|
||||
sg_chain(msg_pl->sg.data, ARRAY_SIZE(msg_pl->sg.data),
|
||||
msg_pl->sg.data);
|
||||
|
||||
rec->content_type = record_type;
|
||||
if (prot->version == TLS_1_3_VERSION) {
|
||||
/* Add content type to end of message. No padding added */
|
||||
sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
|
||||
sg_mark_end(&rec->sg_content_type);
|
||||
sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
|
||||
&rec->sg_content_type);
|
||||
sg_chain(msg_pl->sg.data, i + 2, &rec->sg_content_type);
|
||||
} else {
|
||||
sg_mark_end(sk_msg_elem(msg_pl, i));
|
||||
}
|
||||
|
||||
if (msg_pl->sg.end < msg_pl->sg.start) {
|
||||
sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
|
||||
MAX_SKB_FRAGS - msg_pl->sg.start + 1,
|
||||
msg_pl->sg.data);
|
||||
}
|
||||
|
||||
i = msg_pl->sg.start;
|
||||
sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
|
||||
|
||||
|
||||
@@ -136,27 +136,6 @@ static void virtio_transport_init_hdr(struct sk_buff *skb,
|
||||
hdr->fwd_cnt = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
|
||||
void *dst,
|
||||
size_t len)
|
||||
{
|
||||
struct iov_iter iov_iter = { 0 };
|
||||
struct kvec kvec;
|
||||
size_t to_copy;
|
||||
|
||||
kvec.iov_base = dst;
|
||||
kvec.iov_len = len;
|
||||
|
||||
iov_iter.iter_type = ITER_KVEC;
|
||||
iov_iter.kvec = &kvec;
|
||||
iov_iter.nr_segs = 1;
|
||||
|
||||
to_copy = min_t(size_t, len, skb->len);
|
||||
|
||||
skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
|
||||
&iov_iter, to_copy);
|
||||
}
|
||||
|
||||
/* Packet capture */
|
||||
static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
||||
{
|
||||
@@ -166,12 +145,12 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
||||
struct sk_buff *skb;
|
||||
size_t payload_len;
|
||||
|
||||
/* A packet could be split to fit the RX buffer, so we can retrieve
|
||||
* the payload length from the header and the buffer pointer taking
|
||||
* care of the offset in the original packet.
|
||||
/* A packet could be split to fit the RX buffer, so we use
|
||||
* the payload length from the header, which has been updated
|
||||
* by the sender to reflect the fragment size.
|
||||
*/
|
||||
pkt_hdr = virtio_vsock_hdr(pkt);
|
||||
payload_len = pkt->len;
|
||||
payload_len = le32_to_cpu(pkt_hdr->len);
|
||||
|
||||
skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
|
||||
GFP_ATOMIC);
|
||||
@@ -214,12 +193,18 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
||||
skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
|
||||
|
||||
if (payload_len) {
|
||||
if (skb_is_nonlinear(pkt)) {
|
||||
void *data = skb_put(skb, payload_len);
|
||||
struct iov_iter iov_iter;
|
||||
struct kvec kvec;
|
||||
void *data = skb_put(skb, payload_len);
|
||||
|
||||
virtio_transport_copy_nonlinear_skb(pkt, data, payload_len);
|
||||
} else {
|
||||
skb_put_data(skb, pkt->data, payload_len);
|
||||
kvec.iov_base = data;
|
||||
kvec.iov_len = payload_len;
|
||||
iov_iter_kvec(&iov_iter, ITER_DEST, &kvec, 1, payload_len);
|
||||
|
||||
if (skb_copy_datagram_iter(pkt, VIRTIO_VSOCK_SKB_CB(pkt)->offset,
|
||||
&iov_iter, payload_len)) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,11 @@ UAPI_PATH:=../../../../include/uapi/
|
||||
get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
|
||||
get_hdr_inc2=-D$(1) -D$(2) -include $(UAPI_PATH)/linux/$(3)
|
||||
|
||||
CFLAGS_dev-energymodel:=$(call get_hdr_inc,_LINUX_DEV_ENERGYMODEL_H,dev_energymodel.h)
|
||||
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
|
||||
CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
|
||||
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
|
||||
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_TYPELIMITS_H,typelimits.h) \
|
||||
$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
|
||||
$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) \
|
||||
$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_GENERATED_H,ethtool_netlink_generated.h)
|
||||
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
|
||||
|
||||
@@ -3212,6 +3212,8 @@ def render_uapi(family, cw):
|
||||
for const in family['definitions']:
|
||||
if const.get('header'):
|
||||
continue
|
||||
if const.get('scope', 'uapi') != 'uapi':
|
||||
continue
|
||||
|
||||
if const['type'] != 'const':
|
||||
cw.writes_defines(defines)
|
||||
@@ -3339,6 +3341,25 @@ def render_uapi(family, cw):
|
||||
cw.p(f'#endif /* {hdr_prot} */')
|
||||
|
||||
|
||||
def render_scoped_consts(family, cw, scope):
|
||||
defines = []
|
||||
for const in family['definitions']:
|
||||
if const['type'] != 'const':
|
||||
continue
|
||||
if const.get('header'):
|
||||
continue
|
||||
if const.get('scope') != scope:
|
||||
continue
|
||||
name_pfx = const.get('name-prefix', f"{family.ident_name}-")
|
||||
defines.append([
|
||||
c_upper(family.get('c-define-name',
|
||||
f"{name_pfx}{const['name']}")),
|
||||
const['value']])
|
||||
if defines:
|
||||
cw.writes_defines(defines)
|
||||
cw.nl()
|
||||
|
||||
|
||||
def _render_user_ntf_entry(ri, op):
|
||||
if not ri.family.is_classic():
|
||||
ri.cw.block_start(line=f"[{op.enum_name}] = ")
|
||||
@@ -3504,8 +3525,12 @@ def main():
|
||||
cw.p('#include "ynl.h"')
|
||||
headers = []
|
||||
for definition in parsed['definitions'] + parsed['attribute-sets']:
|
||||
if 'header' in definition:
|
||||
headers.append(definition['header'])
|
||||
if 'header' not in definition:
|
||||
continue
|
||||
scope = definition.get('scope', 'uapi')
|
||||
if scope != 'uapi' and scope != args.mode:
|
||||
continue
|
||||
headers.append(definition['header'])
|
||||
if args.mode == 'user':
|
||||
headers.append(parsed.uapi_header)
|
||||
seen_header = []
|
||||
@@ -3522,6 +3547,7 @@ def main():
|
||||
for one in args.user_header:
|
||||
cw.p(f'#include "{one}"')
|
||||
else:
|
||||
render_scoped_consts(parsed, cw, 'user')
|
||||
cw.p('struct ynl_sock;')
|
||||
cw.nl()
|
||||
render_user_family(parsed, cw, True)
|
||||
@@ -3529,6 +3555,7 @@ def main():
|
||||
|
||||
if args.mode == "kernel":
|
||||
if args.header:
|
||||
render_scoped_consts(parsed, cw, 'kernel')
|
||||
for _, struct in sorted(parsed.pure_nested_structs.items()):
|
||||
if struct.request:
|
||||
cw.p('/* Common nested types */')
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_true, KsftSkipEx
|
||||
import errno
|
||||
|
||||
from lib.py import ksft_run, ksft_exit
|
||||
from lib.py import ksft_eq, ksft_raises, ksft_true, KsftSkipEx
|
||||
from lib.py import EthtoolFamily, NetshaperFamily
|
||||
from lib.py import NetDrvEnv
|
||||
from lib.py import NlError
|
||||
@@ -438,6 +441,21 @@ def queue_update(cfg, nl_shaper) -> None:
|
||||
nl_shaper.delete({'ifindex': cfg.ifindex,
|
||||
'handle': {'scope': 'queue', 'id': i}})
|
||||
|
||||
def dup_leaves(cfg, nl_shaper) -> None:
|
||||
""" Ensure that the kernel rejects duplicate leaves. """
|
||||
if not cfg.groups:
|
||||
raise KsftSkipEx("device does not support node scope")
|
||||
|
||||
with ksft_raises(NlError) as cm:
|
||||
nl_shaper.group({
|
||||
'ifindex': cfg.ifindex,
|
||||
'leaves':[{'handle': {'scope': 'queue', 'id': 0}},
|
||||
{'handle': {'scope': 'queue', 'id': 0}}],
|
||||
'handle': {'scope':'node'},
|
||||
'metric': 'bps',
|
||||
'bw-max': 10000})
|
||||
ksft_eq(cm.exception.error, errno.EINVAL)
|
||||
|
||||
def main() -> None:
|
||||
with NetDrvEnv(__file__, queue_count=4) as cfg:
|
||||
cfg.queues = False
|
||||
@@ -453,7 +471,9 @@ def main() -> None:
|
||||
basic_groups,
|
||||
qgroups,
|
||||
delegation,
|
||||
queue_update], args=(cfg, NetshaperFamily()))
|
||||
dup_leaves,
|
||||
queue_update],
|
||||
args=(cfg, NetshaperFamily()))
|
||||
ksft_exit()
|
||||
|
||||
|
||||
|
||||
@@ -1284,5 +1284,46 @@
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "3a62",
|
||||
"name": "Try to create a qlen underflow with QFQ/CBS",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"qfq",
|
||||
"cbs"
|
||||
],
|
||||
"plugins": {
|
||||
"requires": "nsPlugin"
|
||||
},
|
||||
"setup": [
|
||||
"$IP link set dev $DUMMY up || true",
|
||||
"$IP addr add 10.10.10.10/24 dev $DUMMY || true",
|
||||
"$TC qdisc add dev $DUMMY root handle 1: qfq",
|
||||
"$TC class add dev $DUMMY classid 1:1 parent 1: qfq",
|
||||
"$TC class add dev $DUMMY classid 1:2 parent 1: qfq",
|
||||
"$TC qdisc add dev $DUMMY handle 2: parent 1:1 cbs",
|
||||
"$TC qdisc add dev $DUMMY handle 3: parent 2: netem delay 5000000000",
|
||||
"$TC filter add dev $DUMMY parent 1: prio 1 u32 match ip dst 10.10.10.1 classid 1:1 action ok",
|
||||
"$TC filter add dev $DUMMY parent 1: prio 2 u32 match ip dst 10.10.10.2 classid 1:2 action ok",
|
||||
"ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
|
||||
"$IP l set $DUMMY down",
|
||||
"$IP l set $DUMMY up",
|
||||
"$TC qdisc replace dev $DUMMY handle 4: parent 2: pfifo"
|
||||
],
|
||||
"cmdUnderTest": "ping -c 1 10.10.10.2 -W0.01 -I$DUMMY",
|
||||
"expExitCode": "1",
|
||||
"verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:1",
|
||||
"matchJSON": [
|
||||
{
|
||||
"kind": "cbs",
|
||||
"handle": "2:",
|
||||
"bytes": 0,
|
||||
"packets": 0
|
||||
}
|
||||
],
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user