mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 03:10:30 -04:00
Merge tag 'net-6.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Paolo Abeni:
"This is somewhat larger than what I hoped for, with a few PRs from
subsystems and follow-ups for the recent netdev locking changes,
anyhow there are no known pending regressions.
Including fixes from bluetooth, ipsec and CAN.
Current release - regressions:
- eth: team: grab team lock during team_change_rx_flags
- eth: bnxt_en: fix netdev locking in ULP IRQ functions
Current release - new code bugs:
- xfrm: ipcomp: fix truesize computation on receive
- eth: airoha: fix page recycling in airoha_qdma_rx_process()
Previous releases - regressions:
- sched: hfsc: fix qlen accounting bug when using peek in
hfsc_enqueue()
- mr: consolidate the ipmr_can_free_table() checks.
- bridge: netfilter: fix forwarding of fragmented packets
- xsk: bring back busy polling support in XDP_COPY
- can:
- add missing rcu read protection for procfs content
- kvaser_pciefd: force IRQ edge in case of nested IRQ
Previous releases - always broken:
- xfrm: espintcp: remove encap socket caching to avoid reference leak
- bluetooth: use skb_pull to avoid unsafe access in QCA dump handling
- eth: idpf:
- fix null-ptr-deref in idpf_features_check
- fix idpf_vport_splitq_napi_poll()
- eth: hibmcge: fix wrong ndo.open() after reset fail issue"
* tag 'net-6.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (40 commits)
octeontx2-af: Fix APR entry mapping based on APR_LMT_CFG
octeontx2-af: Set LMT_ENA bit for APR table entries
net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done
octeontx2-pf: Avoid adding dcbnl_ops for LBK and SDP vf
selftests/tc-testing: Add an HFSC qlen accounting test
sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue()
idpf: fix idpf_vport_splitq_napi_poll()
net: hibmcge: fix wrong ndo.open() after reset fail issue.
net: hibmcge: fix incorrect statistics update issue
xsk: Bring back busy polling support in XDP_COPY
can: slcan: allow reception of short error messages
net: lan743x: Restore SGMII CTRL register on resume
bnxt_en: Fix netdev locking in ULP IRQ functions
MAINTAINERS: Drop myself to reviewer for ravb driver
net: dwmac-sun8i: Use parsed internal PHY address instead of 1
net: ethernet: ti: am65-cpsw: Lower random mac address error print to info
can: kvaser_pciefd: Continue parsing DMA buf after dropped RX
can: kvaser_pciefd: Fix echo_skb race
can: kvaser_pciefd: Force IRQ edge in case of nested IRQ
idpf: fix null-ptr-deref in idpf_features_check
...
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml#
|
||||
$id: http://devicetree.org/schemas/net/can/microchip,mcp2510.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Microchip MCP251X stand-alone CAN controller
|
||||
|
||||
@@ -20613,8 +20613,8 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
|
||||
F: drivers/i2c/busses/i2c-emev2.c
|
||||
|
||||
RENESAS ETHERNET AVB DRIVER
|
||||
M: Paul Barker <paul.barker.ct@bp.renesas.com>
|
||||
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
|
||||
R: Paul Barker <paul@pbarker.dev>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
@@ -3014,9 +3014,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
||||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int skip = 0;
|
||||
u8 pkt_type;
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
u16 seqno;
|
||||
u32 dump_size;
|
||||
|
||||
@@ -3025,18 +3024,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
struct usb_device *udev = btdata->udev;
|
||||
|
||||
pkt_type = hci_skb_pkt_type(skb);
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
skip = sizeof(struct hci_event_hdr);
|
||||
if (pkt_type == HCI_ACLDATA_PKT)
|
||||
skip += sizeof(struct hci_acl_hdr);
|
||||
|
||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
}
|
||||
skb_pull(skb, skip);
|
||||
dump_hdr = (struct qca_dump_hdr *)skb->data;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||
if (seqno == 0) {
|
||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||
@@ -3056,16 +3050,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
btdata->qca_dump.ram_dump_size = dump_size;
|
||||
btdata->qca_dump.ram_dump_seqno = 0;
|
||||
sk_ptr += offsetof(struct qca_dump_hdr, data0);
|
||||
sk_len -= offsetof(struct qca_dump_hdr, data0);
|
||||
|
||||
skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
|
||||
|
||||
usb_disable_autosuspend(udev);
|
||||
bt_dev_info(hdev, "%s memdump size(%u)\n",
|
||||
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
|
||||
dump_size);
|
||||
} else {
|
||||
sk_ptr += offsetof(struct qca_dump_hdr, data);
|
||||
sk_len -= offsetof(struct qca_dump_hdr, data);
|
||||
skb_pull(skb, offsetof(struct qca_dump_hdr, data));
|
||||
}
|
||||
|
||||
if (!btdata->qca_dump.ram_dump_size) {
|
||||
@@ -3085,7 +3078,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
skb_pull(skb, skb->len - sk_len);
|
||||
hci_devcd_append(hdev, skb);
|
||||
btdata->qca_dump.ram_dump_seqno++;
|
||||
if (seqno == QCA_LAST_SEQUENCE_NUM) {
|
||||
@@ -3113,68 +3105,58 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
|
||||
bool is_dump = false;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
if (!clone)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
|
||||
if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
|
||||
goto out;
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
|
||||
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
|
||||
goto out;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
|
||||
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
goto out;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
is_dump = true;
|
||||
out:
|
||||
consume_skb(clone);
|
||||
return is_dump;
|
||||
}
|
||||
|
||||
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
|
||||
bool is_dump = false;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
if (!clone)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
|
||||
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
|
||||
goto out;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
|
||||
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
goto out;
|
||||
|
||||
return true;
|
||||
is_dump = true;
|
||||
out:
|
||||
consume_skb(clone);
|
||||
return is_dump;
|
||||
}
|
||||
|
||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/timer.h>
|
||||
#include <net/netdev_queues.h>
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
|
||||
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
|
||||
void __iomem *reg_base;
|
||||
struct can_berr_counter bec;
|
||||
u8 cmd_seq;
|
||||
u8 tx_max_count;
|
||||
u8 tx_idx;
|
||||
u8 ack_idx;
|
||||
int err_rep_cnt;
|
||||
int echo_idx;
|
||||
unsigned int completed_tx_pkts;
|
||||
unsigned int completed_tx_bytes;
|
||||
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
|
||||
spinlock_t echo_lock; /* Locks the message echo buffer */
|
||||
struct timer_list bec_poll_timer;
|
||||
struct completion start_comp, flush_comp;
|
||||
};
|
||||
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
|
||||
int ret;
|
||||
struct kvaser_pciefd_can *can = netdev_priv(netdev);
|
||||
|
||||
can->tx_idx = 0;
|
||||
can->ack_idx = 0;
|
||||
|
||||
ret = open_candev(netdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
|
||||
timer_delete(&can->bec_poll_timer);
|
||||
}
|
||||
can->can.state = CAN_STATE_STOPPED;
|
||||
netdev_reset_queue(netdev);
|
||||
close_candev(netdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
|
||||
{
|
||||
return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
|
||||
}
|
||||
|
||||
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
|
||||
struct kvaser_pciefd_can *can,
|
||||
struct can_priv *can, u8 seq,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
||||
int packet_size;
|
||||
int seq = can->echo_idx;
|
||||
|
||||
memset(p, 0, sizeof(*p));
|
||||
if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
|
||||
if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
|
||||
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
|
||||
|
||||
if (cf->can_id & CAN_RTR_FLAG)
|
||||
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
|
||||
} else {
|
||||
p->header[1] |=
|
||||
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
|
||||
can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
|
||||
can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
|
||||
}
|
||||
|
||||
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
|
||||
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct kvaser_pciefd_can *can = netdev_priv(netdev);
|
||||
unsigned long irq_flags;
|
||||
struct kvaser_pciefd_tx_packet packet;
|
||||
unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
|
||||
unsigned int frame_len;
|
||||
int nr_words;
|
||||
u8 count;
|
||||
|
||||
if (can_dev_dropped_skb(netdev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
|
||||
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
|
||||
|
||||
spin_lock_irqsave(&can->echo_lock, irq_flags);
|
||||
/* Prepare and save echo skb in internal slot */
|
||||
can_put_echo_skb(skb, netdev, can->echo_idx, 0);
|
||||
|
||||
/* Move echo index to the next slot */
|
||||
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
|
||||
WRITE_ONCE(can->can.echo_skb[seq], NULL);
|
||||
frame_len = can_skb_get_frame_len(skb);
|
||||
can_put_echo_skb(skb, netdev, seq, frame_len);
|
||||
netdev_sent_queue(netdev, frame_len);
|
||||
WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
|
||||
|
||||
/* Write header to fifo */
|
||||
iowrite32(packet.header[0],
|
||||
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
|
||||
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
|
||||
}
|
||||
|
||||
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
|
||||
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
|
||||
/* No room for a new message, stop the queue until at least one
|
||||
* successful transmit
|
||||
*/
|
||||
if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
|
||||
netif_stop_queue(netdev);
|
||||
spin_unlock_irqrestore(&can->echo_lock, irq_flags);
|
||||
netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
|
||||
can->kv_pcie = pcie;
|
||||
can->cmd_seq = 0;
|
||||
can->err_rep_cnt = 0;
|
||||
can->completed_tx_pkts = 0;
|
||||
can->completed_tx_bytes = 0;
|
||||
can->bec.txerr = 0;
|
||||
can->bec.rxerr = 0;
|
||||
|
||||
@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
|
||||
tx_nr_packets_max =
|
||||
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
|
||||
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
|
||||
can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
|
||||
|
||||
can->can.clock.freq = pcie->freq;
|
||||
can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
|
||||
can->echo_idx = 0;
|
||||
spin_lock_init(&can->echo_lock);
|
||||
can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
|
||||
spin_lock_init(&can->lock);
|
||||
|
||||
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
|
||||
@@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
|
||||
skb = alloc_canfd_skb(priv->dev, &cf);
|
||||
if (!skb) {
|
||||
priv->dev->stats.rx_dropped++;
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cf->len = can_fd_dlc2len(dlc);
|
||||
@@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
|
||||
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
|
||||
if (!skb) {
|
||||
priv->dev->stats.rx_dropped++;
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
|
||||
}
|
||||
@@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
|
||||
priv->dev->stats.rx_packets++;
|
||||
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
|
||||
|
||||
return netif_rx(skb);
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
|
||||
@@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
|
||||
netdev_dbg(can->can.dev, "Packet was flushed\n");
|
||||
} else {
|
||||
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
|
||||
int len;
|
||||
u8 count;
|
||||
unsigned int len, frame_len = 0;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
|
||||
return 0;
|
||||
skb = can->can.echo_skb[echo_idx];
|
||||
if (skb)
|
||||
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
|
||||
len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
|
||||
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
|
||||
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
|
||||
if (!skb)
|
||||
return 0;
|
||||
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
|
||||
len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
|
||||
|
||||
if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
|
||||
netif_wake_queue(can->can.dev);
|
||||
/* Pairs with barrier in kvaser_pciefd_start_xmit() */
|
||||
smp_store_release(&can->ack_idx, can->ack_idx + 1);
|
||||
can->completed_tx_pkts++;
|
||||
can->completed_tx_bytes += frame_len;
|
||||
|
||||
if (!one_shot_fail) {
|
||||
can->can.dev->stats.tx_bytes += len;
|
||||
@@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
|
||||
{
|
||||
int pos = 0;
|
||||
int res = 0;
|
||||
unsigned int i;
|
||||
|
||||
do {
|
||||
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
|
||||
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
|
||||
|
||||
/* Report ACKs in this buffer to BQL en masse for correct periods */
|
||||
for (i = 0; i < pcie->nr_channels; ++i) {
|
||||
struct kvaser_pciefd_can *can = pcie->can[i];
|
||||
|
||||
if (!can->completed_tx_pkts)
|
||||
continue;
|
||||
netif_subqueue_completed_wake(can->can.dev, 0,
|
||||
can->completed_tx_pkts,
|
||||
can->completed_tx_bytes,
|
||||
kvaser_pciefd_tx_avail(can), 1);
|
||||
can->completed_tx_pkts = 0;
|
||||
can->completed_tx_bytes = 0;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
|
||||
static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
|
||||
{
|
||||
void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
|
||||
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
|
||||
|
||||
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
|
||||
kvaser_pciefd_read_buffer(pcie, 0);
|
||||
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
|
||||
|
||||
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
|
||||
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
|
||||
kvaser_pciefd_read_buffer(pcie, 0);
|
||||
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
|
||||
}
|
||||
|
||||
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
|
||||
kvaser_pciefd_read_buffer(pcie, 1);
|
||||
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
|
||||
}
|
||||
|
||||
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
|
||||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
|
||||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
|
||||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
|
||||
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
|
||||
|
||||
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
|
||||
return irq;
|
||||
}
|
||||
|
||||
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
|
||||
@@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
|
||||
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
|
||||
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
|
||||
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
|
||||
u32 srb_irq = 0;
|
||||
u32 srb_release = 0;
|
||||
int i;
|
||||
|
||||
if (!(pci_irq & irq_mask->all))
|
||||
return IRQ_NONE;
|
||||
|
||||
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
|
||||
|
||||
if (pci_irq & irq_mask->kcan_rx0)
|
||||
srb_irq = kvaser_pciefd_receive_irq(pcie);
|
||||
kvaser_pciefd_receive_irq(pcie);
|
||||
|
||||
for (i = 0; i < pcie->nr_channels; i++) {
|
||||
if (pci_irq & irq_mask->kcan_tx[i])
|
||||
kvaser_pciefd_transmit_irq(pcie->can[i]);
|
||||
}
|
||||
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
|
||||
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
|
||||
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
|
||||
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
|
||||
|
||||
if (srb_release)
|
||||
iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
|
||||
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
|
||||
}
|
||||
}
|
||||
|
||||
static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Masking PCI_IRQ is insufficient as running ISR will unmask it */
|
||||
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
|
||||
for (i = 0; i < pcie->nr_channels; ++i)
|
||||
iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
|
||||
}
|
||||
|
||||
static int kvaser_pciefd_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
struct kvaser_pciefd *pcie;
|
||||
const struct kvaser_pciefd_irq_mask *irq_mask;
|
||||
void __iomem *irq_en_base;
|
||||
|
||||
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
@@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
|
||||
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
|
||||
|
||||
/* Enable PCI interrupts */
|
||||
irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
|
||||
iowrite32(irq_mask->all, irq_en_base);
|
||||
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
|
||||
/* Ready the DMA buffers */
|
||||
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
|
||||
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
|
||||
@@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
/* Disable PCI interrupts */
|
||||
iowrite32(0, irq_en_base);
|
||||
kvaser_pciefd_disable_irq_srcs(pcie);
|
||||
free_irq(pcie->pci->irq, pcie);
|
||||
|
||||
err_pci_free_irq_vectors:
|
||||
@@ -1844,35 +1875,26 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pcie->nr_channels; i++) {
|
||||
struct kvaser_pciefd_can *can = pcie->can[i];
|
||||
|
||||
if (can) {
|
||||
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
|
||||
unregister_candev(can->can.dev);
|
||||
timer_delete(&can->bec_poll_timer);
|
||||
kvaser_pciefd_pwm_stop(can);
|
||||
free_candev(can->can.dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void kvaser_pciefd_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
|
||||
unsigned int i;
|
||||
|
||||
kvaser_pciefd_remove_all_ctrls(pcie);
|
||||
for (i = 0; i < pcie->nr_channels; ++i) {
|
||||
struct kvaser_pciefd_can *can = pcie->can[i];
|
||||
|
||||
/* Disable interrupts */
|
||||
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
|
||||
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
|
||||
unregister_candev(can->can.dev);
|
||||
timer_delete(&can->bec_poll_timer);
|
||||
kvaser_pciefd_pwm_stop(can);
|
||||
}
|
||||
|
||||
kvaser_pciefd_disable_irq_srcs(pcie);
|
||||
free_irq(pcie->pci->irq, pcie);
|
||||
pci_free_irq_vectors(pcie->pci);
|
||||
|
||||
for (i = 0; i < pcie->nr_channels; ++i)
|
||||
free_candev(pcie->can[i]->can.dev);
|
||||
|
||||
pci_iounmap(pdev, pcie->reg_base);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
|
||||
#define SLCAN_CMD_LEN 1
|
||||
#define SLCAN_SFF_ID_LEN 3
|
||||
#define SLCAN_EFF_ID_LEN 8
|
||||
#define SLCAN_DATA_LENGTH_LEN 1
|
||||
#define SLCAN_ERROR_LEN 1
|
||||
#define SLCAN_STATE_LEN 1
|
||||
#define SLCAN_STATE_BE_RXCNT_LEN 3
|
||||
#define SLCAN_STATE_BE_TXCNT_LEN 3
|
||||
#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
|
||||
SLCAN_STATE_BE_RXCNT_LEN + \
|
||||
SLCAN_STATE_BE_TXCNT_LEN)
|
||||
#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
|
||||
SLCAN_STATE_LEN + \
|
||||
SLCAN_STATE_BE_RXCNT_LEN + \
|
||||
SLCAN_STATE_BE_TXCNT_LEN)
|
||||
#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
|
||||
SLCAN_ERROR_LEN + \
|
||||
SLCAN_DATA_LENGTH_LEN)
|
||||
#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
|
||||
SLCAN_SFF_ID_LEN + \
|
||||
SLCAN_DATA_LENGTH_LEN)
|
||||
struct slcan {
|
||||
struct can_priv can;
|
||||
|
||||
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
|
||||
u32 tmpid;
|
||||
char *cmd = sl->rbuff;
|
||||
|
||||
if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
|
||||
return;
|
||||
|
||||
skb = alloc_can_skb(sl->dev, &cf);
|
||||
if (unlikely(!skb)) {
|
||||
sl->dev->stats.rx_dropped++;
|
||||
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
|
||||
return;
|
||||
}
|
||||
|
||||
if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
|
||||
if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
|
||||
return;
|
||||
|
||||
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
|
||||
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
|
||||
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
|
||||
int i, len;
|
||||
|
||||
if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
|
||||
return;
|
||||
|
||||
/* get len from sanitized ASCII value */
|
||||
len = cmd[1];
|
||||
if (len >= '0' && len < '9')
|
||||
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
|
||||
static void slcan_unesc(struct slcan *sl, unsigned char s)
|
||||
{
|
||||
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
|
||||
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
|
||||
sl->rcount > 4)
|
||||
if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
|
||||
slcan_bump(sl);
|
||||
|
||||
sl->rcount = 0;
|
||||
|
||||
@@ -614,7 +614,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
|
||||
struct airoha_queue_entry *e = &q->entry[q->tail];
|
||||
struct airoha_qdma_desc *desc = &q->desc[q->tail];
|
||||
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
|
||||
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
|
||||
struct page *page = virt_to_head_page(e->buf);
|
||||
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
|
||||
struct airoha_gdm_port *port;
|
||||
@@ -623,22 +622,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
|
||||
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
|
||||
break;
|
||||
|
||||
if (!dma_addr)
|
||||
break;
|
||||
|
||||
len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
|
||||
if (!len)
|
||||
break;
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
|
||||
dma_sync_single_for_cpu(eth->dev, dma_addr,
|
||||
dma_sync_single_for_cpu(eth->dev, e->dma_addr,
|
||||
SKB_WITH_OVERHEAD(q->buf_size), dir);
|
||||
|
||||
len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
|
||||
data_len = q->skb ? q->buf_size
|
||||
: SKB_WITH_OVERHEAD(q->buf_size);
|
||||
if (data_len < len)
|
||||
if (!len || data_len < len)
|
||||
goto free_frag;
|
||||
|
||||
p = airoha_qdma_get_gdm_port(eth, desc);
|
||||
@@ -701,9 +694,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
|
||||
q->skb = NULL;
|
||||
continue;
|
||||
free_frag:
|
||||
page_pool_put_full_page(q->page_pool, page, true);
|
||||
dev_kfree_skb(q->skb);
|
||||
q->skb = NULL;
|
||||
if (q->skb) {
|
||||
dev_kfree_skb(q->skb);
|
||||
q->skb = NULL;
|
||||
} else {
|
||||
page_pool_put_full_page(q->page_pool, page, true);
|
||||
}
|
||||
}
|
||||
airoha_qdma_fill_rx_queue(q);
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/auxiliary_bus.h>
|
||||
#include <net/netdev_lock.h>
|
||||
|
||||
#include "bnxt_hsi.h"
|
||||
#include "bnxt.h"
|
||||
@@ -309,14 +310,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
|
||||
if (!ulp->msix_requested)
|
||||
return;
|
||||
|
||||
netdev_lock(bp->dev);
|
||||
ops = rcu_dereference(ulp->ulp_ops);
|
||||
ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
|
||||
if (!ops || !ops->ulp_irq_stop)
|
||||
return;
|
||||
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
|
||||
reset = true;
|
||||
ops->ulp_irq_stop(ulp->handle, reset);
|
||||
netdev_unlock(bp->dev);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,8 +334,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
|
||||
if (!ulp->msix_requested)
|
||||
return;
|
||||
|
||||
netdev_lock(bp->dev);
|
||||
ops = rcu_dereference(ulp->ulp_ops);
|
||||
ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
|
||||
if (!ops || !ops->ulp_irq_restart)
|
||||
return;
|
||||
|
||||
@@ -348,7 +346,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
|
||||
bnxt_fill_msix_vecs(bp, ent);
|
||||
}
|
||||
ops->ulp_irq_restart(ulp->handle, ent);
|
||||
netdev_unlock(bp->dev);
|
||||
kfree(ent);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
netif_device_detach(priv->netdev);
|
||||
|
||||
priv->reset_type = type;
|
||||
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
|
||||
@@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
|
||||
return ret;
|
||||
}
|
||||
|
||||
netif_device_attach(priv->netdev);
|
||||
|
||||
dev_info(&priv->pdev->dev, "reset done\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv)
|
||||
if (running)
|
||||
dev_close(priv->netdev);
|
||||
|
||||
hbg_reset(priv);
|
||||
|
||||
/* in hbg_pci_err_detected(), we will detach first,
|
||||
* so we need to attach before open
|
||||
*/
|
||||
if (!netif_device_present(priv->netdev))
|
||||
netif_device_attach(priv->netdev);
|
||||
if (hbg_reset(priv))
|
||||
goto err_unlock;
|
||||
|
||||
if (running)
|
||||
dev_open(priv->netdev, NULL);
|
||||
|
||||
err_unlock:
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
@@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
pci_save_state(pdev);
|
||||
|
||||
hbg_err_reset(priv);
|
||||
netif_device_attach(netdev);
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
|
||||
@@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv,
|
||||
const struct hbg_ethtool_stats *stats;
|
||||
u32 i;
|
||||
|
||||
if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
|
||||
return;
|
||||
|
||||
for (i = 0; i < info_len; i++) {
|
||||
stats = &info[i];
|
||||
if (!stats->reg)
|
||||
|
||||
@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
|
||||
*/
|
||||
if (!primary_lag) {
|
||||
lag->primary = true;
|
||||
if (!ice_is_switchdev_running(lag->pf))
|
||||
return;
|
||||
|
||||
/* Configure primary's SWID to be shared */
|
||||
ice_lag_primary_swid(lag, true);
|
||||
primary_lag = lag;
|
||||
} else {
|
||||
u16 swid;
|
||||
|
||||
if (!ice_is_switchdev_running(primary_lag->pf))
|
||||
return;
|
||||
|
||||
swid = primary_lag->pf->hw.port_info->sw_id;
|
||||
ice_lag_set_swid(swid, lag, true);
|
||||
ice_lag_add_prune_list(primary_lag, lag->pf);
|
||||
|
||||
@@ -4275,7 +4275,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
ice_vfhw_mac_add(vf, &al->list[i]);
|
||||
vf->num_mac++;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -143,6 +143,7 @@ enum idpf_vport_state {
|
||||
* @vport_id: Vport identifier
|
||||
* @link_speed_mbps: Link speed in mbps
|
||||
* @vport_idx: Relative vport index
|
||||
* @max_tx_hdr_size: Max header length hardware can support
|
||||
* @state: See enum idpf_vport_state
|
||||
* @netstats: Packet and byte stats
|
||||
* @stats_lock: Lock to protect stats update
|
||||
@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
|
||||
u32 vport_id;
|
||||
u32 link_speed_mbps;
|
||||
u16 vport_idx;
|
||||
u16 max_tx_hdr_size;
|
||||
enum idpf_vport_state state;
|
||||
struct rtnl_link_stats64 netstats;
|
||||
spinlock_t stats_lock;
|
||||
|
||||
@@ -723,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
|
||||
np->vport = vport;
|
||||
np->vport_idx = vport->idx;
|
||||
np->vport_id = vport->vport_id;
|
||||
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
|
||||
vport->netdev = netdev;
|
||||
|
||||
return idpf_init_mac_addr(vport, netdev);
|
||||
@@ -740,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
|
||||
np->adapter = adapter;
|
||||
np->vport_idx = vport->idx;
|
||||
np->vport_id = vport->vport_id;
|
||||
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
|
||||
|
||||
spin_lock_init(&np->stats_lock);
|
||||
|
||||
@@ -2203,8 +2205,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
|
||||
struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
||||
u16 max_tx_hdr_size = np->max_tx_hdr_size;
|
||||
size_t len;
|
||||
|
||||
/* No point in doing any of this if neither checksum nor GSO are
|
||||
@@ -2227,7 +2229,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
|
||||
goto unsupported;
|
||||
|
||||
len = skb_network_header_len(skb);
|
||||
if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
|
||||
if (unlikely(len > max_tx_hdr_size))
|
||||
goto unsupported;
|
||||
|
||||
if (!skb->encapsulation)
|
||||
@@ -2240,7 +2242,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
|
||||
|
||||
/* IPLEN can support at most 127 dwords */
|
||||
len = skb_inner_network_header_len(skb);
|
||||
if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
|
||||
if (unlikely(len > max_tx_hdr_size))
|
||||
goto unsupported;
|
||||
|
||||
/* No need to validate L4LEN as TCP is the only protocol with a
|
||||
|
||||
@@ -4025,6 +4025,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
|
||||
return budget;
|
||||
}
|
||||
|
||||
/* Switch to poll mode in the tear-down path after sending disable
|
||||
* queues virtchnl message, as the interrupts will be disabled after
|
||||
* that.
|
||||
*/
|
||||
if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
|
||||
q_vector->tx[0])))
|
||||
return budget;
|
||||
|
||||
work_done = min_t(int, work_done, budget - 1);
|
||||
|
||||
/* Exit the polling mode, but don't re-enable interrupts if stack might
|
||||
@@ -4035,15 +4043,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
|
||||
else
|
||||
idpf_vport_intr_set_wb_on_itr(q_vector);
|
||||
|
||||
/* Switch to poll mode in the tear-down path after sending disable
|
||||
* queues virtchnl message, as the interrupts will be disabled after
|
||||
* that
|
||||
*/
|
||||
if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
|
||||
q_vector->tx[0])))
|
||||
return budget;
|
||||
else
|
||||
return work_done;
|
||||
return work_done;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -13,19 +13,26 @@
|
||||
/* RVU LMTST */
|
||||
#define LMT_TBL_OP_READ 0
|
||||
#define LMT_TBL_OP_WRITE 1
|
||||
#define LMT_MAP_TABLE_SIZE (128 * 1024)
|
||||
#define LMT_MAPTBL_ENTRY_SIZE 16
|
||||
#define LMT_MAX_VFS 256
|
||||
|
||||
#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
|
||||
#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
|
||||
|
||||
/* Function to perform operations (read/write) on lmtst map table */
|
||||
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
|
||||
int lmt_tbl_op)
|
||||
{
|
||||
void __iomem *lmt_map_base;
|
||||
u64 tbl_base;
|
||||
u64 tbl_base, cfg;
|
||||
int pfs, vfs;
|
||||
|
||||
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
|
||||
cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
|
||||
vfs = 1 << (cfg & 0xF);
|
||||
pfs = 1 << ((cfg >> 4) & 0x7);
|
||||
|
||||
lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
|
||||
lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
|
||||
if (!lmt_map_base) {
|
||||
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
|
||||
return -ENOMEM;
|
||||
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
|
||||
*val = readq(lmt_map_base + index);
|
||||
} else {
|
||||
writeq((*val), (lmt_map_base + index));
|
||||
|
||||
cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
|
||||
/* 2048 LMTLINES */
|
||||
cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
|
||||
|
||||
writeq(cfg, (lmt_map_base + (index + 8)));
|
||||
|
||||
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
|
||||
* changes effective. Write 1 for flush and read is being used as a
|
||||
* barrier and sets up a data dependency. Write to 0 after a write
|
||||
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
|
||||
#define LMT_MAP_TBL_W1_OFF 8
|
||||
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
|
||||
{
|
||||
return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
|
||||
return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
|
||||
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
|
||||
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
|
||||
pf = rvu_get_pf(pcifunc) & 0x1F;
|
||||
pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
|
||||
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
|
||||
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
|
||||
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
|
||||
|
||||
@@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
u64 lmt_addr, val, tbl_base;
|
||||
int pf, vf, num_vfs, hw_vfs;
|
||||
void __iomem *lmt_map_base;
|
||||
int apr_pfs, apr_vfs;
|
||||
int buf_size = 10240;
|
||||
size_t off = 0;
|
||||
int index = 0;
|
||||
@@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
return -ENOMEM;
|
||||
|
||||
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
|
||||
val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
|
||||
apr_vfs = 1 << (val & 0xF);
|
||||
apr_pfs = 1 << ((val >> 4) & 0x7);
|
||||
|
||||
lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
|
||||
lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
|
||||
LMT_MAPTBL_ENTRY_SIZE);
|
||||
if (!lmt_map_base) {
|
||||
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
|
||||
kfree(buf);
|
||||
@@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
|
||||
pf);
|
||||
|
||||
index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
|
||||
index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
|
||||
(tbl_base + index));
|
||||
lmt_addr = readq(lmt_map_base + index);
|
||||
@@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
/* Reading num of VFs per PF */
|
||||
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
|
||||
for (vf = 0; vf < num_vfs; vf++) {
|
||||
index = (pf * rvu->hw->total_vfs * 16) +
|
||||
index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
|
||||
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"PF%d:VF%d \t\t", pf, vf);
|
||||
|
||||
@@ -729,9 +729,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
err = otx2_dcbnl_set_ops(netdev);
|
||||
if (err)
|
||||
goto err_free_zc_bmap;
|
||||
/* Priority flow control is not supported for LBK and SDP vf(s) */
|
||||
if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
|
||||
err = otx2_dcbnl_set_ops(netdev);
|
||||
if (err)
|
||||
goto err_free_zc_bmap;
|
||||
}
|
||||
#endif
|
||||
otx2_qos_init(vf, qos_txqs);
|
||||
|
||||
|
||||
@@ -3495,6 +3495,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
|
||||
struct pci_dev *pdev)
|
||||
{
|
||||
struct lan743x_tx *tx;
|
||||
u32 sgmii_ctl;
|
||||
int index;
|
||||
int ret;
|
||||
|
||||
@@ -3507,6 +3508,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
|
||||
spin_lock_init(&adapter->eth_syslock_spinlock);
|
||||
mutex_init(&adapter->sgmii_rw_lock);
|
||||
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
|
||||
sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
|
||||
if (adapter->is_sgmii_en) {
|
||||
sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
|
||||
sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
|
||||
} else {
|
||||
sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
|
||||
sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
|
||||
}
|
||||
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
|
||||
} else {
|
||||
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
|
||||
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
|
||||
@@ -3558,7 +3568,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
|
||||
|
||||
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
|
||||
{
|
||||
u32 sgmii_ctl;
|
||||
int ret;
|
||||
|
||||
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
|
||||
@@ -3570,10 +3579,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
|
||||
adapter->mdiobus->priv = (void *)adapter;
|
||||
if (adapter->is_pci11x1x) {
|
||||
if (adapter->is_sgmii_en) {
|
||||
sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
|
||||
sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
|
||||
sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
|
||||
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
|
||||
netif_dbg(adapter, drv, adapter->netdev,
|
||||
"SGMII operation\n");
|
||||
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
|
||||
@@ -3584,10 +3589,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
|
||||
netif_dbg(adapter, drv, adapter->netdev,
|
||||
"lan743x-mdiobus-c45\n");
|
||||
} else {
|
||||
sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
|
||||
sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
|
||||
sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
|
||||
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
|
||||
netif_dbg(adapter, drv, adapter->netdev,
|
||||
"RGMII operation\n");
|
||||
// Only C22 support when RGMII I/F
|
||||
|
||||
@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
|
||||
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
|
||||
* address. No need to mask it again.
|
||||
*/
|
||||
reg |= 1 << H3_EPHY_ADDR_SHIFT;
|
||||
reg |= ret << H3_EPHY_ADDR_SHIFT;
|
||||
} else {
|
||||
/* For SoCs without internal PHY the PHY selection bit should be
|
||||
* set to 0 (external PHY).
|
||||
|
||||
@@ -2685,7 +2685,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
|
||||
port->slave.mac_addr);
|
||||
if (!is_valid_ether_addr(port->slave.mac_addr)) {
|
||||
eth_random_addr(port->slave.mac_addr);
|
||||
dev_err(dev, "Use random MAC address\n");
|
||||
dev_info(dev, "Use random MAC address\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change)
|
||||
struct team_port *port;
|
||||
int inc;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &team->port_list, list) {
|
||||
mutex_lock(&team->lock);
|
||||
list_for_each_entry(port, &team->port_list, list) {
|
||||
if (change & IFF_PROMISC) {
|
||||
inc = dev->flags & IFF_PROMISC ? 1 : -1;
|
||||
dev_set_promiscuity(port->dev, inc);
|
||||
@@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change)
|
||||
dev_set_allmulti(port->dev, inc);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&team->lock);
|
||||
}
|
||||
|
||||
static void team_set_rx_mode(struct net_device *dev)
|
||||
|
||||
@@ -3607,8 +3607,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
int err = 0;
|
||||
|
||||
WRITE_ONCE(netdev->mtu, new_mtu);
|
||||
|
||||
/*
|
||||
* Reset_work may be in the middle of resetting the device, wait for its
|
||||
* completion.
|
||||
@@ -3622,6 +3620,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
|
||||
/* we need to re-create the rx queue based on the new mtu */
|
||||
vmxnet3_rq_destroy_all(adapter);
|
||||
WRITE_ONCE(netdev->mtu, new_mtu);
|
||||
vmxnet3_adjust_rx_ring_size(adapter);
|
||||
err = vmxnet3_rq_create_all(adapter);
|
||||
if (err) {
|
||||
@@ -3638,6 +3637,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
"Closing it\n", err);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
WRITE_ONCE(netdev->mtu, new_mtu);
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@@ -315,6 +315,8 @@ struct ptp_ocp_serial_port {
|
||||
#define OCP_BOARD_ID_LEN 13
|
||||
#define OCP_SERIAL_LEN 6
|
||||
#define OCP_SMA_NUM 4
|
||||
#define OCP_SIGNAL_NUM 4
|
||||
#define OCP_FREQ_NUM 4
|
||||
|
||||
enum {
|
||||
PORT_GNSS,
|
||||
@@ -342,8 +344,8 @@ struct ptp_ocp {
|
||||
struct dcf_master_reg __iomem *dcf_out;
|
||||
struct dcf_slave_reg __iomem *dcf_in;
|
||||
struct tod_reg __iomem *nmea_out;
|
||||
struct frequency_reg __iomem *freq_in[4];
|
||||
struct ptp_ocp_ext_src *signal_out[4];
|
||||
struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM];
|
||||
struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM];
|
||||
struct ptp_ocp_ext_src *pps;
|
||||
struct ptp_ocp_ext_src *ts0;
|
||||
struct ptp_ocp_ext_src *ts1;
|
||||
@@ -378,10 +380,12 @@ struct ptp_ocp {
|
||||
u32 utc_tai_offset;
|
||||
u32 ts_window_adjust;
|
||||
u64 fw_cap;
|
||||
struct ptp_ocp_signal signal[4];
|
||||
struct ptp_ocp_signal signal[OCP_SIGNAL_NUM];
|
||||
struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
|
||||
const struct ocp_sma_op *sma_op;
|
||||
struct dpll_device *dpll;
|
||||
int signals_nr;
|
||||
int freq_in_nr;
|
||||
};
|
||||
|
||||
#define OCP_REQ_TIMESTAMP BIT(0)
|
||||
@@ -2697,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
bp->eeprom_map = fb_eeprom_map;
|
||||
bp->fw_version = ioread32(&bp->image->version);
|
||||
bp->sma_op = &ocp_fb_sma_op;
|
||||
bp->signals_nr = 4;
|
||||
bp->freq_in_nr = 4;
|
||||
|
||||
ptp_ocp_fb_set_version(bp);
|
||||
|
||||
@@ -2862,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
bp->fw_version = ioread32(&bp->reg->version);
|
||||
bp->fw_tag = 2;
|
||||
bp->sma_op = &ocp_art_sma_op;
|
||||
bp->signals_nr = 4;
|
||||
bp->freq_in_nr = 4;
|
||||
|
||||
/* Enable MAC serial port during initialisation */
|
||||
iowrite32(1, &bp->board_config->mro50_serial_activate);
|
||||
@@ -2888,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
bp->flash_start = 0xA00000;
|
||||
bp->eeprom_map = fb_eeprom_map;
|
||||
bp->sma_op = &ocp_adva_sma_op;
|
||||
bp->signals_nr = 2;
|
||||
bp->freq_in_nr = 2;
|
||||
|
||||
version = ioread32(&bp->image->version);
|
||||
/* if lower 16 bits are empty, this is the fw loader. */
|
||||
@@ -4008,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
|
||||
{
|
||||
struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
|
||||
struct ptp_ocp_signal *signal = &bp->signal[nr];
|
||||
char label[8];
|
||||
char label[16];
|
||||
bool on;
|
||||
u32 val;
|
||||
|
||||
@@ -4031,7 +4041,7 @@ static void
|
||||
_frequency_summary_show(struct seq_file *s, int nr,
|
||||
struct frequency_reg __iomem *reg)
|
||||
{
|
||||
char label[8];
|
||||
char label[16];
|
||||
bool on;
|
||||
u32 val;
|
||||
|
||||
@@ -4175,11 +4185,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
|
||||
}
|
||||
|
||||
if (bp->fw_cap & OCP_CAP_SIGNAL)
|
||||
for (i = 0; i < 4; i++)
|
||||
for (i = 0; i < bp->signals_nr; i++)
|
||||
_signal_summary_show(s, bp, i);
|
||||
|
||||
if (bp->fw_cap & OCP_CAP_FREQ)
|
||||
for (i = 0; i < 4; i++)
|
||||
for (i = 0; i < bp->freq_in_nr; i++)
|
||||
_frequency_summary_show(s, i, bp->freq_in[i]);
|
||||
|
||||
if (bp->irig_out) {
|
||||
|
||||
@@ -262,6 +262,11 @@ struct mr_table {
|
||||
int mroute_reg_vif_num;
|
||||
};
|
||||
|
||||
static inline bool mr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net) || !net_initialized(net);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_MROUTE_COMMON
|
||||
void vif_device_init(struct vif_device *v,
|
||||
struct net_device *dev,
|
||||
|
||||
@@ -98,6 +98,9 @@ static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
|
||||
&qdisc_xmit_lock_key); \
|
||||
}
|
||||
|
||||
#define netdev_lock_dereference(p, dev) \
|
||||
rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
|
||||
|
||||
int netdev_debug_event(struct notifier_block *nb, unsigned long event,
|
||||
void *ptr);
|
||||
|
||||
|
||||
@@ -236,7 +236,6 @@ struct xfrm_state {
|
||||
|
||||
/* Data for encapsulator */
|
||||
struct xfrm_encap_tmpl *encap;
|
||||
struct sock __rcu *encap_sk;
|
||||
|
||||
/* NAT keepalive */
|
||||
u32 nat_keepalive_interval; /* seconds */
|
||||
|
||||
@@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn)
|
||||
sizeof(req), &req);
|
||||
}
|
||||
|
||||
static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
|
||||
static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
|
||||
struct l2cap_chan *chan)
|
||||
{
|
||||
/* The minimum encryption key size needs to be enforced by the
|
||||
* host stack before establishing any L2CAP connections. The
|
||||
@@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
|
||||
int min_key_size = hcon->hdev->min_enc_key_size;
|
||||
|
||||
/* On FIPS security level, key size must be 16 bytes */
|
||||
if (hcon->sec_level == BT_SECURITY_FIPS)
|
||||
if (chan->sec_level == BT_SECURITY_FIPS)
|
||||
min_key_size = 16;
|
||||
|
||||
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
|
||||
@@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
|
||||
!__l2cap_no_conn_pending(chan))
|
||||
return;
|
||||
|
||||
if (l2cap_check_enc_key_size(conn->hcon))
|
||||
if (l2cap_check_enc_key_size(conn->hcon, chan))
|
||||
l2cap_start_connection(chan);
|
||||
else
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
@@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (l2cap_check_enc_key_size(conn->hcon))
|
||||
if (l2cap_check_enc_key_size(conn->hcon, chan))
|
||||
l2cap_start_connection(chan);
|
||||
else
|
||||
l2cap_chan_close(chan, ECONNREFUSED);
|
||||
@@ -3992,7 +3993,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
|
||||
/* Check if the ACL is secure enough (if not SDP) */
|
||||
if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
|
||||
(!hci_conn_check_link_mode(conn->hcon) ||
|
||||
!l2cap_check_enc_key_size(conn->hcon))) {
|
||||
!l2cap_check_enc_key_size(conn->hcon, pchan))) {
|
||||
conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
|
||||
result = L2CAP_CR_SEC_BLOCK;
|
||||
goto response;
|
||||
@@ -7352,7 +7353,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
}
|
||||
|
||||
if (chan->state == BT_CONNECT) {
|
||||
if (!status && l2cap_check_enc_key_size(hcon))
|
||||
if (!status && l2cap_check_enc_key_size(hcon, chan))
|
||||
l2cap_start_connection(chan);
|
||||
else
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
@@ -7362,7 +7363,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
struct l2cap_conn_rsp rsp;
|
||||
__u16 res, stat;
|
||||
|
||||
if (!status && l2cap_check_enc_key_size(hcon)) {
|
||||
if (!status && l2cap_check_enc_key_size(hcon, chan)) {
|
||||
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
|
||||
res = L2CAP_CR_PEND;
|
||||
stat = L2CAP_CS_AUTHOR_PEND;
|
||||
|
||||
@@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = {
|
||||
* ipt_REJECT needs it. Future netfilter modules might
|
||||
* require us to fill additional fields.
|
||||
*/
|
||||
static const u32 br_dst_default_metrics[RTAX_MAX] = {
|
||||
[RTAX_MTU - 1] = 1500,
|
||||
};
|
||||
|
||||
void br_netfilter_rtable_init(struct net_bridge *br)
|
||||
{
|
||||
struct rtable *rt = &br->fake_rtable;
|
||||
|
||||
rcuref_init(&rt->dst.__rcuref, 1);
|
||||
rt->dst.dev = br->dev;
|
||||
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
|
||||
dst_init_metrics(&rt->dst, br->metrics, false);
|
||||
dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu);
|
||||
rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
|
||||
rt->dst.ops = &fake_dst_ops;
|
||||
}
|
||||
|
||||
@@ -505,6 +505,7 @@ struct net_bridge {
|
||||
struct rtable fake_rtable;
|
||||
struct rt6_info fake_rt6_info;
|
||||
};
|
||||
u32 metrics[RTAX_MAX];
|
||||
#endif
|
||||
u16 group_fwd_mask;
|
||||
u16 group_fwd_mask_required;
|
||||
|
||||
@@ -58,6 +58,7 @@
|
||||
#include <linux/can/skb.h>
|
||||
#include <linux/can/bcm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/net_namespace.h>
|
||||
|
||||
@@ -122,6 +123,7 @@ struct bcm_op {
|
||||
struct canfd_frame last_sframe;
|
||||
struct sock *sk;
|
||||
struct net_device *rx_reg_dev;
|
||||
spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
|
||||
};
|
||||
|
||||
struct bcm_sock {
|
||||
@@ -217,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
|
||||
seq_printf(m, " <<<\n");
|
||||
|
||||
list_for_each_entry(op, &bo->rx_ops, list) {
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(op, &bo->rx_ops, list) {
|
||||
|
||||
unsigned long reduction;
|
||||
|
||||
@@ -273,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, "# sent %ld\n", op->frames_abs);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
@@ -285,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
|
||||
struct canfd_frame *cf;
|
||||
int err;
|
||||
|
||||
/* no target device? => exit */
|
||||
if (!op->ifindex)
|
||||
return;
|
||||
|
||||
/* read currframe under lock protection */
|
||||
spin_lock_bh(&op->bcm_tx_lock);
|
||||
cf = op->frames + op->cfsiz * op->currframe;
|
||||
spin_unlock_bh(&op->bcm_tx_lock);
|
||||
|
||||
dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
|
||||
if (!dev) {
|
||||
/* RFC: should this bcm_op remove itself here? */
|
||||
@@ -312,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op)
|
||||
skb->dev = dev;
|
||||
can_skb_set_owner(skb, op->sk);
|
||||
err = can_send(skb, 1);
|
||||
|
||||
/* update currframe and count under lock protection */
|
||||
spin_lock_bh(&op->bcm_tx_lock);
|
||||
|
||||
if (!err)
|
||||
op->frames_abs++;
|
||||
|
||||
@@ -320,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op)
|
||||
/* reached last frame? */
|
||||
if (op->currframe >= op->nframes)
|
||||
op->currframe = 0;
|
||||
|
||||
if (op->count > 0)
|
||||
op->count--;
|
||||
|
||||
spin_unlock_bh(&op->bcm_tx_lock);
|
||||
out:
|
||||
dev_put(dev);
|
||||
}
|
||||
@@ -430,7 +451,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
|
||||
struct bcm_msg_head msg_head;
|
||||
|
||||
if (op->kt_ival1 && (op->count > 0)) {
|
||||
op->count--;
|
||||
bcm_can_tx(op);
|
||||
if (!op->count && (op->flags & TX_COUNTEVT)) {
|
||||
|
||||
/* create notification to user */
|
||||
@@ -445,7 +466,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
|
||||
|
||||
bcm_send_to_user(op, &msg_head, NULL, 0);
|
||||
}
|
||||
bcm_can_tx(op);
|
||||
|
||||
} else if (op->kt_ival2) {
|
||||
bcm_can_tx(op);
|
||||
@@ -843,7 +863,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
|
||||
REGMASK(op->can_id),
|
||||
bcm_rx_handler, op);
|
||||
|
||||
list_del(&op->list);
|
||||
list_del_rcu(&op->list);
|
||||
bcm_remove_op(op);
|
||||
return 1; /* done */
|
||||
}
|
||||
@@ -863,7 +883,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
|
||||
list_for_each_entry_safe(op, n, ops, list) {
|
||||
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
|
||||
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
|
||||
list_del(&op->list);
|
||||
list_del_rcu(&op->list);
|
||||
bcm_remove_op(op);
|
||||
return 1; /* done */
|
||||
}
|
||||
@@ -956,6 +976,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
}
|
||||
op->flags = msg_head->flags;
|
||||
|
||||
/* only lock for unlikely count/nframes/currframe changes */
|
||||
if (op->nframes != msg_head->nframes ||
|
||||
op->flags & TX_RESET_MULTI_IDX ||
|
||||
op->flags & SETTIMER) {
|
||||
|
||||
spin_lock_bh(&op->bcm_tx_lock);
|
||||
|
||||
if (op->nframes != msg_head->nframes ||
|
||||
op->flags & TX_RESET_MULTI_IDX) {
|
||||
/* potentially update changed nframes */
|
||||
op->nframes = msg_head->nframes;
|
||||
/* restart multiple frame transmission */
|
||||
op->currframe = 0;
|
||||
}
|
||||
|
||||
if (op->flags & SETTIMER)
|
||||
op->count = msg_head->count;
|
||||
|
||||
spin_unlock_bh(&op->bcm_tx_lock);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* insert new BCM operation for the given can_id */
|
||||
|
||||
@@ -963,9 +1004,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&op->bcm_tx_lock);
|
||||
op->can_id = msg_head->can_id;
|
||||
op->cfsiz = CFSIZ(msg_head->flags);
|
||||
op->flags = msg_head->flags;
|
||||
op->nframes = msg_head->nframes;
|
||||
|
||||
if (op->flags & SETTIMER)
|
||||
op->count = msg_head->count;
|
||||
|
||||
/* create array for CAN frames and copy the data */
|
||||
if (msg_head->nframes > 1) {
|
||||
@@ -1023,22 +1069,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
|
||||
} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
|
||||
|
||||
if (op->nframes != msg_head->nframes) {
|
||||
op->nframes = msg_head->nframes;
|
||||
/* start multiple frame transmission with index 0 */
|
||||
op->currframe = 0;
|
||||
}
|
||||
|
||||
/* check flags */
|
||||
|
||||
if (op->flags & TX_RESET_MULTI_IDX) {
|
||||
/* start multiple frame transmission with index 0 */
|
||||
op->currframe = 0;
|
||||
}
|
||||
|
||||
if (op->flags & SETTIMER) {
|
||||
/* set timer values */
|
||||
op->count = msg_head->count;
|
||||
op->ival1 = msg_head->ival1;
|
||||
op->ival2 = msg_head->ival2;
|
||||
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
|
||||
@@ -1055,11 +1087,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
op->flags |= TX_ANNOUNCE;
|
||||
}
|
||||
|
||||
if (op->flags & TX_ANNOUNCE) {
|
||||
if (op->flags & TX_ANNOUNCE)
|
||||
bcm_can_tx(op);
|
||||
if (op->count)
|
||||
op->count--;
|
||||
}
|
||||
|
||||
if (op->flags & STARTTIMER)
|
||||
bcm_tx_start_timer(op);
|
||||
@@ -1272,7 +1301,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
bcm_rx_handler, op, "bcm", sk);
|
||||
if (err) {
|
||||
/* this bcm rx op is broken -> remove it */
|
||||
list_del(&op->list);
|
||||
list_del_rcu(&op->list);
|
||||
bcm_remove_op(op);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -140,7 +140,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
|
||||
u8 *tag;
|
||||
|
||||
if (skb_linearize(skb))
|
||||
return NULL;
|
||||
|
||||
tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
|
||||
|
||||
return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
|
||||
KSZ_EGRESS_TAG_LEN);
|
||||
@@ -311,10 +316,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
|
||||
|
||||
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
/* Tag decoding */
|
||||
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
|
||||
unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
|
||||
unsigned int len = KSZ_EGRESS_TAG_LEN;
|
||||
unsigned int port;
|
||||
u8 *tag;
|
||||
|
||||
if (skb_linearize(skb))
|
||||
return NULL;
|
||||
|
||||
/* Tag decoding */
|
||||
tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
|
||||
port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
|
||||
|
||||
/* Extra 4-bytes PTP timestamp */
|
||||
if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
|
||||
|
||||
@@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET_ESPINTCP
|
||||
struct esp_tcp_sk {
|
||||
struct sock *sk;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static void esp_free_tcp_sk(struct rcu_head *head)
|
||||
{
|
||||
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
|
||||
|
||||
sock_put(esk->sk);
|
||||
kfree(esk);
|
||||
}
|
||||
|
||||
static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
|
||||
{
|
||||
struct xfrm_encap_tmpl *encap = x->encap;
|
||||
struct net *net = xs_net(x);
|
||||
struct esp_tcp_sk *esk;
|
||||
__be16 sport, dport;
|
||||
struct sock *nsk;
|
||||
struct sock *sk;
|
||||
|
||||
sk = rcu_dereference(x->encap_sk);
|
||||
if (sk && sk->sk_state == TCP_ESTABLISHED)
|
||||
return sk;
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
sport = encap->encap_sport;
|
||||
dport = encap->encap_dport;
|
||||
nsk = rcu_dereference_protected(x->encap_sk,
|
||||
lockdep_is_held(&x->lock));
|
||||
if (sk && sk == nsk) {
|
||||
esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
|
||||
if (!esk) {
|
||||
spin_unlock_bh(&x->lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
RCU_INIT_POINTER(x->encap_sk, NULL);
|
||||
esk->sk = sk;
|
||||
call_rcu(&esk->rcu, esp_free_tcp_sk);
|
||||
}
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
|
||||
@@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
nsk = rcu_dereference_protected(x->encap_sk,
|
||||
lockdep_is_held(&x->lock));
|
||||
if (encap->encap_sport != sport ||
|
||||
encap->encap_dport != dport) {
|
||||
sock_put(sk);
|
||||
sk = nsk ?: ERR_PTR(-EREMCHG);
|
||||
} else if (sk == nsk) {
|
||||
sock_put(sk);
|
||||
} else {
|
||||
rcu_assign_pointer(x->encap_sk, sk);
|
||||
}
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
@@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
|
||||
|
||||
sk = esp_find_tcp_sk(x);
|
||||
err = PTR_ERR_OR_ZERO(sk);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk))
|
||||
@@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
|
||||
err = espintcp_push_skb(sk, skb);
|
||||
bh_unlock_sock(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
@@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
|
||||
if (IS_ERR(sk))
|
||||
return ERR_CAST(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
*lenp = htons(len);
|
||||
esph = (struct ip_esp_hdr *)(lenp + 1);
|
||||
|
||||
|
||||
@@ -120,11 +120,6 @@ static void ipmr_expire_process(struct timer_list *t);
|
||||
lockdep_rtnl_is_held() || \
|
||||
list_empty(&net->ipv4.mr_tables))
|
||||
|
||||
static bool ipmr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net) || !net_initialized(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ipmr_mr_table_iter(struct net *net,
|
||||
struct mr_table *mrt)
|
||||
{
|
||||
@@ -317,11 +312,6 @@ EXPORT_SYMBOL(ipmr_rule_default);
|
||||
#define ipmr_for_each_table(mrt, net) \
|
||||
for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
|
||||
|
||||
static bool ipmr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ipmr_mr_table_iter(struct net *net,
|
||||
struct mr_table *mrt)
|
||||
{
|
||||
@@ -437,7 +427,7 @@ static void ipmr_free_table(struct mr_table *mrt)
|
||||
{
|
||||
struct net *net = read_pnet(&mrt->net);
|
||||
|
||||
WARN_ON_ONCE(!ipmr_can_free_table(net));
|
||||
WARN_ON_ONCE(!mr_can_free_table(net));
|
||||
|
||||
timer_shutdown_sync(&mrt->ipmr_expire_timer);
|
||||
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
|
||||
|
||||
@@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
int offset = skb_gro_offset(skb);
|
||||
const struct net_offload *ops;
|
||||
struct sk_buff *pp = NULL;
|
||||
int ret;
|
||||
int len, dlen;
|
||||
__u8 *udpdata;
|
||||
__be32 *udpdata32;
|
||||
|
||||
offset = offset - sizeof(struct udphdr);
|
||||
|
||||
if (!pskb_pull(skb, offset))
|
||||
len = skb->len - offset;
|
||||
dlen = offset + min(len, 8);
|
||||
udpdata = skb_gro_header(skb, dlen, offset);
|
||||
udpdata32 = (__be32 *)udpdata;
|
||||
if (unlikely(!udpdata))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out;
|
||||
|
||||
ret = __xfrm4_udp_encap_rcv(sk, skb, false);
|
||||
if (ret)
|
||||
/* check if it is a keepalive or IKE packet */
|
||||
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
|
||||
goto out;
|
||||
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
@@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->same_flow = 0;
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
|
||||
|
||||
@@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET6_ESPINTCP
|
||||
struct esp_tcp_sk {
|
||||
struct sock *sk;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static void esp_free_tcp_sk(struct rcu_head *head)
|
||||
{
|
||||
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
|
||||
|
||||
sock_put(esk->sk);
|
||||
kfree(esk);
|
||||
}
|
||||
|
||||
static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
|
||||
{
|
||||
struct xfrm_encap_tmpl *encap = x->encap;
|
||||
struct net *net = xs_net(x);
|
||||
struct esp_tcp_sk *esk;
|
||||
__be16 sport, dport;
|
||||
struct sock *nsk;
|
||||
struct sock *sk;
|
||||
|
||||
sk = rcu_dereference(x->encap_sk);
|
||||
if (sk && sk->sk_state == TCP_ESTABLISHED)
|
||||
return sk;
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
sport = encap->encap_sport;
|
||||
dport = encap->encap_dport;
|
||||
nsk = rcu_dereference_protected(x->encap_sk,
|
||||
lockdep_is_held(&x->lock));
|
||||
if (sk && sk == nsk) {
|
||||
esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
|
||||
if (!esk) {
|
||||
spin_unlock_bh(&x->lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
RCU_INIT_POINTER(x->encap_sk, NULL);
|
||||
esk->sk = sk;
|
||||
call_rcu(&esk->rcu, esp_free_tcp_sk);
|
||||
}
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
|
||||
@@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
nsk = rcu_dereference_protected(x->encap_sk,
|
||||
lockdep_is_held(&x->lock));
|
||||
if (encap->encap_sport != sport ||
|
||||
encap->encap_dport != dport) {
|
||||
sock_put(sk);
|
||||
sk = nsk ?: ERR_PTR(-EREMCHG);
|
||||
} else if (sk == nsk) {
|
||||
sock_put(sk);
|
||||
} else {
|
||||
rcu_assign_pointer(x->encap_sk, sk);
|
||||
}
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
@@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
|
||||
|
||||
sk = esp6_find_tcp_sk(x);
|
||||
err = PTR_ERR_OR_ZERO(sk);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk))
|
||||
@@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
|
||||
err = espintcp_push_skb(sk, skb);
|
||||
bh_unlock_sock(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
@@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
|
||||
if (IS_ERR(sk))
|
||||
return ERR_CAST(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
*lenp = htons(len);
|
||||
esph = (struct ip_esp_hdr *)(lenp + 1);
|
||||
|
||||
|
||||
@@ -108,11 +108,6 @@ static void ipmr_expire_process(struct timer_list *t);
|
||||
lockdep_rtnl_is_held() || \
|
||||
list_empty(&net->ipv6.mr6_tables))
|
||||
|
||||
static bool ip6mr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net) || !net_initialized(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
|
||||
struct mr_table *mrt)
|
||||
{
|
||||
@@ -306,11 +301,6 @@ EXPORT_SYMBOL(ip6mr_rule_default);
|
||||
#define ip6mr_for_each_table(mrt, net) \
|
||||
for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
|
||||
|
||||
static bool ip6mr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
|
||||
struct mr_table *mrt)
|
||||
{
|
||||
@@ -416,7 +406,7 @@ static void ip6mr_free_table(struct mr_table *mrt)
|
||||
{
|
||||
struct net *net = read_pnet(&mrt->net);
|
||||
|
||||
WARN_ON_ONCE(!ip6mr_can_free_table(net));
|
||||
WARN_ON_ONCE(!mr_can_free_table(net));
|
||||
|
||||
timer_shutdown_sync(&mrt->ipmr_expire_timer);
|
||||
mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
|
||||
|
||||
@@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
int offset = skb_gro_offset(skb);
|
||||
const struct net_offload *ops;
|
||||
struct sk_buff *pp = NULL;
|
||||
int ret;
|
||||
int len, dlen;
|
||||
__u8 *udpdata;
|
||||
__be32 *udpdata32;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
return xfrm4_gro_udp_encap_rcv(sk, head, skb);
|
||||
|
||||
offset = offset - sizeof(struct udphdr);
|
||||
|
||||
if (!pskb_pull(skb, offset))
|
||||
len = skb->len - offset;
|
||||
dlen = offset + min(len, 8);
|
||||
udpdata = skb_gro_header(skb, dlen, offset);
|
||||
udpdata32 = (__be32 *)udpdata;
|
||||
if (unlikely(!udpdata))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out;
|
||||
|
||||
ret = __xfrm6_udp_encap_rcv(sk, skb, false);
|
||||
if (ret)
|
||||
/* check if it is a keepalive or IKE packet */
|
||||
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
|
||||
goto out;
|
||||
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
@@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->same_flow = 0;
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
|
||||
|
||||
@@ -887,15 +887,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
if (sk->sk_type != SOCK_STREAM)
|
||||
goto copy_uaddr;
|
||||
|
||||
/* Partial read */
|
||||
if (used + offset < skb_len)
|
||||
continue;
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
kfree_skb(skb);
|
||||
*seq = 0;
|
||||
}
|
||||
|
||||
/* Partial read */
|
||||
if (used + offset < skb_len)
|
||||
continue;
|
||||
} while (len > 0);
|
||||
|
||||
out:
|
||||
|
||||
@@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
if (first && !cl->cl_nactive) {
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
init_ed(cl, len);
|
||||
@@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
}
|
||||
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Get net to avoid freed tipc_crypto when delete namespace */
|
||||
get_net(aead->crypto->net);
|
||||
|
||||
/* Now, do encrypt */
|
||||
rc = crypto_aead_encrypt(req);
|
||||
if (rc == -EINPROGRESS || rc == -EBUSY)
|
||||
return rc;
|
||||
|
||||
tipc_bearer_put(b);
|
||||
put_net(aead->crypto->net);
|
||||
|
||||
exit:
|
||||
kfree(ctx);
|
||||
@@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err)
|
||||
kfree(tx_ctx);
|
||||
tipc_bearer_put(b);
|
||||
tipc_aead_put(aead);
|
||||
put_net(net);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1304,7 +1304,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
xs->queue_id = qid;
|
||||
xp_add_xsk(xs->pool, xs);
|
||||
|
||||
if (xs->zc && qid < dev->real_num_rx_queues) {
|
||||
if (qid < dev->real_num_rx_queues) {
|
||||
struct netdev_rx_queue *rxq;
|
||||
|
||||
rxq = __netif_get_rx_queue(dev, qid);
|
||||
|
||||
@@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
|
||||
struct espintcp_ctx *ctx = espintcp_getctx(sk);
|
||||
|
||||
if (skb_queue_len(&ctx->out_queue) >=
|
||||
READ_ONCE(net_hotdata.max_backlog))
|
||||
READ_ONCE(net_hotdata.max_backlog)) {
|
||||
kfree_skb(skb);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
__skb_queue_tail(&ctx->out_queue, skb);
|
||||
|
||||
|
||||
@@ -48,7 +48,6 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen)
|
||||
{
|
||||
struct acomp_req *req = ipcomp_cb(skb)->req;
|
||||
struct ipcomp_req_extra *extra;
|
||||
const int plen = skb->data_len;
|
||||
struct scatterlist *dsg;
|
||||
int len, dlen;
|
||||
|
||||
@@ -64,7 +63,7 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen)
|
||||
|
||||
/* Only update truesize on input. */
|
||||
if (!hlen)
|
||||
skb->truesize += dlen - plen;
|
||||
skb->truesize += dlen;
|
||||
skb->data_len = dlen;
|
||||
skb->len += dlen;
|
||||
|
||||
|
||||
@@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
||||
struct xfrm_policy *delpol;
|
||||
struct hlist_head *chain;
|
||||
|
||||
/* Sanitize mark before store */
|
||||
policy->mark.v &= policy->mark.m;
|
||||
|
||||
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
|
||||
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
|
||||
if (chain)
|
||||
|
||||
@@ -838,9 +838,6 @@ int __xfrm_state_delete(struct xfrm_state *x)
|
||||
xfrm_nat_keepalive_state_updated(x);
|
||||
spin_unlock(&net->xfrm.xfrm_state_lock);
|
||||
|
||||
if (x->encap_sk)
|
||||
sock_put(rcu_dereference_raw(x->encap_sk));
|
||||
|
||||
xfrm_dev_state_delete(x);
|
||||
|
||||
/* All xfrm_state objects are created by xfrm_state_alloc.
|
||||
@@ -1721,6 +1718,9 @@ static void __xfrm_state_insert(struct xfrm_state *x)
|
||||
|
||||
list_add(&x->km.all, &net->xfrm.state_all);
|
||||
|
||||
/* Sanitize mark before store */
|
||||
x->mark.v &= x->mark.m;
|
||||
|
||||
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
|
||||
x->props.reqid, x->props.family);
|
||||
XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
|
||||
|
||||
@@ -573,5 +573,32 @@
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DEV1 handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "831d",
|
||||
"name": "Test HFSC qlen accounting with DRR/NETEM/BLACKHOLE chain",
|
||||
"category": ["qdisc", "hfsc", "drr", "netem", "blackhole"],
|
||||
"plugins": { "requires": ["nsPlugin", "scapyPlugin"] },
|
||||
"setup": [
|
||||
"$IP link set dev $DEV1 up || true",
|
||||
"$TC qdisc add dev $DEV1 root handle 1: drr",
|
||||
"$TC filter add dev $DEV1 parent 1: basic classid 1:1",
|
||||
"$TC class add dev $DEV1 parent 1: classid 1:1 drr",
|
||||
"$TC qdisc add dev $DEV1 parent 1:1 handle 2: hfsc def 1",
|
||||
"$TC class add dev $DEV1 parent 2: classid 2:1 hfsc rt m1 8 d 1 m2 0",
|
||||
"$TC qdisc add dev $DEV1 parent 2:1 handle 3: netem",
|
||||
"$TC qdisc add dev $DEV1 parent 3:1 handle 4: blackhole"
|
||||
],
|
||||
"scapy": {
|
||||
"iface": "$DEV0",
|
||||
"count": 5,
|
||||
"packet": "Ether()/IP(dst='10.10.10.1', src='10.10.10.10')/ICMP()"
|
||||
},
|
||||
"cmdUnderTest": "$TC -s qdisc show dev $DEV1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC -s qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc hfsc",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 root handle 1: drr"]
|
||||
}
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user