mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 14:02:37 -04:00
Merge tag 'net-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Paolo Abeni:
"Core:
- Introduce a config option to tweak MAX_SKB_FRAGS. Increasing the
default value allows for better BIG TCP performances
- Reduce compound page head access for zero-copy data transfers
- RPS/RFS improvements, avoiding unneeded NET_RX_SOFTIRQ when
possible
- Threaded NAPI improvements, adding defer skb free support and
unneeded softirq avoidance
- Address dst_entry reference count scalability issues, via false
sharing avoidance and optimize refcount tracking
- Add lockless accesses annotation to sk_err[_soft]
- Optimize again the skb struct layout
- Extends the skb drop reasons to make it usable by multiple
subsystems
- Better const qualifier awareness for socket casts
BPF:
- Add skb and XDP typed dynptrs which allow BPF programs for more
ergonomic and less brittle iteration through data and
variable-sized accesses
- Add a new BPF netfilter program type and minimal support to hook
BPF programs to netfilter hooks such as prerouting or forward
- Add more precise memory usage reporting for all BPF map types
- Adds support for using {FOU,GUE} encap with an ipip device
operating in collect_md mode and add a set of BPF kfuncs for
controlling encap params
- Allow BPF programs to detect at load time whether a particular
kfunc exists or not, and also add support for this in light
skeleton
- Bigger batch of BPF verifier improvements to prepare for upcoming
BPF open-coded iterators allowing for less restrictive looping
capabilities
- Rework RCU enforcement in the verifier, add kptr_rcu and enforce
BPF programs to NULL-check before passing such pointers into kfunc
- Add support for kptrs in percpu hashmaps, percpu LRU hashmaps and
in local storage maps
- Enable RCU semantics for task BPF kptrs and allow referenced kptr
tasks to be stored in BPF maps
- Add support for refcounted local kptrs to the verifier for allowing
shared ownership, useful for adding a node to both the BPF list and
rbtree
- Add BPF verifier support for ST instructions in
convert_ctx_access() which will help new -mcpu=v4 clang flag to
start emitting them
- Add ARM32 USDT support to libbpf
- Improve bpftool's visual program dump which produces the control
flow graph in a DOT format by adding C source inline annotations
Protocols:
- IPv4: Allow adding to IPv4 address a 'protocol' tag. Such value
indicates the provenance of the IP address
- IPv6: optimize route lookup, dropping unneeded R/W lock acquisition
- Add the handshake upcall mechanism, allowing the user-space to
implement generic TLS handshake on kernel's behalf
- Bridge: support per-{Port, VLAN} neighbor suppression, increasing
resilience to nodes failures
- SCTP: add support for Fair Capacity and Weighted Fair Queueing
schedulers
- MPTCP: delay first subflow allocation up to its first usage. This
will allow for later better LSM interaction
- xfrm: Remove inner/outer modes from input/output path. These are
not needed anymore
- WiFi:
- reduced neighbor report (RNR) handling for AP mode
- HW timestamping support
- support for randomized auth/deauth TA for PASN privacy
- per-link debugfs for multi-link
- TC offload support for mac80211 drivers
- mac80211 mesh fast-xmit and fast-rx support
- enable Wi-Fi 7 (EHT) mesh support
Netfilter:
- Add nf_tables 'brouting' support, to force a packet to be routed
instead of being bridged
- Update bridge netfilter and ovs conntrack helpers to handle IPv6
Jumbo packets properly, i.e. fetch the packet length from
hop-by-hop extension header. This is needed for BIT TCP support
- The iptables 32bit compat interface isn't compiled in by default
anymore
- Move ip(6)tables builtin icmp matches to the udptcp one. This has
the advantage that icmp/icmpv6 match doesn't load the
iptables/ip6tables modules anymore when iptables-nft is used
- Extended netlink error report for netdevice in flowtables and
netdev/chains. Allow for incrementally add/delete devices to netdev
basechain. Allow to create netdev chain without device
Driver API:
- Remove redundant Device Control Error Reporting Enable, as PCI core
has already error reporting enabled at enumeration time
- Move Multicast DB netlink handlers to core, allowing devices other
then bridge to use them
- Allow the page_pool to directly recycle the pages from safely
localized NAPI
- Implement lockless TX queue stop/wake combo macros, allowing for
further code de-duplication and sanitization
- Add YNL support for user headers and struct attrs
- Add partial YNL specification for devlink
- Add partial YNL specification for ethtool
- Add tc-mqprio and tc-taprio support for preemptible traffic classes
- Add tx push buf len param to ethtool, specifies the maximum number
of bytes of a transmitted packet a driver can push directly to the
underlying device
- Add basic LED support for switch/phy
- Add NAPI documentation, stop relaying on external links
- Convert dsa_master_ioctl() to netdev notifier. This is a
preparatory work to make the hardware timestamping layer selectable
by user space
- Add transceiver support and improve the error messages for CAN-FD
controllers
New hardware / drivers:
- Ethernet:
- AMD/Pensando core device support
- MediaTek MT7981 SoC
- MediaTek MT7988 SoC
- Broadcom BCM53134 embedded switch
- Texas Instruments CPSW9G ethernet switch
- Qualcomm EMAC3 DWMAC ethernet
- StarFive JH7110 SoC
- NXP CBTX ethernet PHY
- WiFi:
- Apple M1 Pro/Max devices
- RealTek rtl8710bu/rtl8188gu
- RealTek rtl8822bs, rtl8822cs and rtl8821cs SDIO chipset
- Bluetooth:
- Realtek RTL8821CS, RTL8851B, RTL8852BS
- Mediatek MT7663, MT7922
- NXP w8997
- Actions Semi ATS2851
- QTI WCN6855
- Marvell 88W8997
- Can:
- STMicroelectronics bxcan stm32f429
Drivers:
- Ethernet NICs:
- Intel (1G, icg):
- add tracking and reporting of QBV config errors
- add support for configuring max SDU for each Tx queue
- Intel (100G, ice):
- refactor mailbox overflow detection to support Scalable IOV
- GNSS interface optimization
- Intel (i40e):
- support XDP multi-buffer
- nVidia/Mellanox:
- add the support for linux bridge multicast offload
- enable TC offload for egress and engress MACVLAN over bond
- add support for VxLAN GBP encap/decap flows offload
- extend packet offload to fully support libreswan
- support tunnel mode in mlx5 IPsec packet offload
- extend XDP multi-buffer support
- support MACsec VLAN offload
- add support for dynamic msix vectors allocation
- drop RX page_cache and fully use page_pool
- implement thermal zone to report NIC temperature
- Netronome/Corigine:
- add support for multi-zone conntrack offload
- Solarflare/Xilinx:
- support offloading TC VLAN push/pop actions to the MAE
- support TC decap rules
- support unicast PTP
- Other NICs:
- Broadcom (bnxt): enforce software based freq adjustments only on
shared PHC NIC
- RealTek (r8169): refactor to addess ASPM issues during NAPI poll
- Micrel (lan8841): add support for PTP_PF_PEROUT
- Cadence (macb): enable PTP unicast
- Engleder (tsnep): add XDP socket zero-copy support
- virtio-net: implement exact header length guest feature
- veth: add page_pool support for page recycling
- vxlan: add MDB data path support
- gve: add XDP support for GQI-QPL format
- geneve: accept every ethertype
- macvlan: allow some packets to bypass broadcast queue
- mana: add support for jumbo frame
- Ethernet high-speed switches:
- Microchip (sparx5): Add support for TC flower templates
- Ethernet embedded switches:
- Broadcom (b54):
- configure 6318 and 63268 RGMII ports
- Marvell (mv88e6xxx):
- faster C45 bus scan
- Microchip:
- lan966x:
- add support for IS1 VCAP
- better TX/RX from/to CPU performances
- ksz9477: add ETS Qdisc support
- ksz8: enhance static MAC table operations and error handling
- sama7g5: add PTP capability
- NXP (ocelot):
- add support for external ports
- add support for preemptible traffic classes
- Texas Instruments:
- add CPSWxG SGMII support for J7200 and J721E
- Intel WiFi (iwlwifi):
- preparation for Wi-Fi 7 EHT and multi-link support
- EHT (Wi-Fi 7) sniffer support
- hardware timestamping support for some devices/firwmares
- TX beacon protection on newer hardware
- Qualcomm 802.11ax WiFi (ath11k):
- MU-MIMO parameters support
- ack signal support for management packets
- RealTek WiFi (rtw88):
- SDIO bus support
- better support for some SDIO devices (e.g. MAC address from
efuse)
- RealTek WiFi (rtw89):
- HW scan support for 8852b
- better support for 6 GHz scanning
- support for various newer firmware APIs
- framework firmware backwards compatibility
- MediaTek WiFi (mt76):
- P2P support
- mesh A-MSDU support
- EHT (Wi-Fi 7) support
- coredump support"
* tag 'net-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2078 commits)
net: phy: hide the PHYLIB_LEDS knob
net: phy: marvell-88x2222: remove unnecessary (void*) conversions
tcp/udp: Fix memleaks of sk and zerocopy skbs with TX timestamp.
net: amd: Fix link leak when verifying config failed
net: phy: marvell: Fix inconsistent indenting in led_blink_set
lan966x: Don't use xdp_frame when action is XDP_TX
tsnep: Add XDP socket zero-copy TX support
tsnep: Add XDP socket zero-copy RX support
tsnep: Move skb receive action to separate function
tsnep: Add functions for queue enable/disable
tsnep: Rework TX/RX queue initialization
tsnep: Replace modulo operation with mask
net: phy: dp83867: Add led_brightness_set support
net: phy: Fix reading LED reg property
drivers: nfc: nfcsim: remove return value check of `dev_dir`
net: phy: dp83867: Remove unnecessary (void*) conversions
net: ethtool: coalesce: try to make user settings stick twice
net: mana: Check if netdev/napi_alloc_frag returns single page
net: mana: Rename mana_refill_rxoob and remove some empty lines
net: veth: add page_pool stats
...
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
#define _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
typedef struct user_pt_regs bpf_user_pt_regs_t;
|
||||
|
||||
#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
|
||||
@@ -1,9 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
#define _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
|
||||
#include "ptrace.h"
|
||||
|
||||
typedef user_pt_regs bpf_user_pt_regs_t;
|
||||
|
||||
#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
|
||||
@@ -1,458 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_S390_PTRACE_H
|
||||
#define _UAPI_S390_PTRACE_H
|
||||
|
||||
/*
|
||||
* Offsets in the user_regs_struct. They are used for the ptrace
|
||||
* system call and in entry.S
|
||||
*/
|
||||
#ifndef __s390x__
|
||||
|
||||
#define PT_PSWMASK 0x00
|
||||
#define PT_PSWADDR 0x04
|
||||
#define PT_GPR0 0x08
|
||||
#define PT_GPR1 0x0C
|
||||
#define PT_GPR2 0x10
|
||||
#define PT_GPR3 0x14
|
||||
#define PT_GPR4 0x18
|
||||
#define PT_GPR5 0x1C
|
||||
#define PT_GPR6 0x20
|
||||
#define PT_GPR7 0x24
|
||||
#define PT_GPR8 0x28
|
||||
#define PT_GPR9 0x2C
|
||||
#define PT_GPR10 0x30
|
||||
#define PT_GPR11 0x34
|
||||
#define PT_GPR12 0x38
|
||||
#define PT_GPR13 0x3C
|
||||
#define PT_GPR14 0x40
|
||||
#define PT_GPR15 0x44
|
||||
#define PT_ACR0 0x48
|
||||
#define PT_ACR1 0x4C
|
||||
#define PT_ACR2 0x50
|
||||
#define PT_ACR3 0x54
|
||||
#define PT_ACR4 0x58
|
||||
#define PT_ACR5 0x5C
|
||||
#define PT_ACR6 0x60
|
||||
#define PT_ACR7 0x64
|
||||
#define PT_ACR8 0x68
|
||||
#define PT_ACR9 0x6C
|
||||
#define PT_ACR10 0x70
|
||||
#define PT_ACR11 0x74
|
||||
#define PT_ACR12 0x78
|
||||
#define PT_ACR13 0x7C
|
||||
#define PT_ACR14 0x80
|
||||
#define PT_ACR15 0x84
|
||||
#define PT_ORIGGPR2 0x88
|
||||
#define PT_FPC 0x90
|
||||
/*
|
||||
* A nasty fact of life that the ptrace api
|
||||
* only supports passing of longs.
|
||||
*/
|
||||
#define PT_FPR0_HI 0x98
|
||||
#define PT_FPR0_LO 0x9C
|
||||
#define PT_FPR1_HI 0xA0
|
||||
#define PT_FPR1_LO 0xA4
|
||||
#define PT_FPR2_HI 0xA8
|
||||
#define PT_FPR2_LO 0xAC
|
||||
#define PT_FPR3_HI 0xB0
|
||||
#define PT_FPR3_LO 0xB4
|
||||
#define PT_FPR4_HI 0xB8
|
||||
#define PT_FPR4_LO 0xBC
|
||||
#define PT_FPR5_HI 0xC0
|
||||
#define PT_FPR5_LO 0xC4
|
||||
#define PT_FPR6_HI 0xC8
|
||||
#define PT_FPR6_LO 0xCC
|
||||
#define PT_FPR7_HI 0xD0
|
||||
#define PT_FPR7_LO 0xD4
|
||||
#define PT_FPR8_HI 0xD8
|
||||
#define PT_FPR8_LO 0XDC
|
||||
#define PT_FPR9_HI 0xE0
|
||||
#define PT_FPR9_LO 0xE4
|
||||
#define PT_FPR10_HI 0xE8
|
||||
#define PT_FPR10_LO 0xEC
|
||||
#define PT_FPR11_HI 0xF0
|
||||
#define PT_FPR11_LO 0xF4
|
||||
#define PT_FPR12_HI 0xF8
|
||||
#define PT_FPR12_LO 0xFC
|
||||
#define PT_FPR13_HI 0x100
|
||||
#define PT_FPR13_LO 0x104
|
||||
#define PT_FPR14_HI 0x108
|
||||
#define PT_FPR14_LO 0x10C
|
||||
#define PT_FPR15_HI 0x110
|
||||
#define PT_FPR15_LO 0x114
|
||||
#define PT_CR_9 0x118
|
||||
#define PT_CR_10 0x11C
|
||||
#define PT_CR_11 0x120
|
||||
#define PT_IEEE_IP 0x13C
|
||||
#define PT_LASTOFF PT_IEEE_IP
|
||||
#define PT_ENDREGS 0x140-1
|
||||
|
||||
#define GPR_SIZE 4
|
||||
#define CR_SIZE 4
|
||||
|
||||
#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
|
||||
|
||||
#else /* __s390x__ */
|
||||
|
||||
#define PT_PSWMASK 0x00
|
||||
#define PT_PSWADDR 0x08
|
||||
#define PT_GPR0 0x10
|
||||
#define PT_GPR1 0x18
|
||||
#define PT_GPR2 0x20
|
||||
#define PT_GPR3 0x28
|
||||
#define PT_GPR4 0x30
|
||||
#define PT_GPR5 0x38
|
||||
#define PT_GPR6 0x40
|
||||
#define PT_GPR7 0x48
|
||||
#define PT_GPR8 0x50
|
||||
#define PT_GPR9 0x58
|
||||
#define PT_GPR10 0x60
|
||||
#define PT_GPR11 0x68
|
||||
#define PT_GPR12 0x70
|
||||
#define PT_GPR13 0x78
|
||||
#define PT_GPR14 0x80
|
||||
#define PT_GPR15 0x88
|
||||
#define PT_ACR0 0x90
|
||||
#define PT_ACR1 0x94
|
||||
#define PT_ACR2 0x98
|
||||
#define PT_ACR3 0x9C
|
||||
#define PT_ACR4 0xA0
|
||||
#define PT_ACR5 0xA4
|
||||
#define PT_ACR6 0xA8
|
||||
#define PT_ACR7 0xAC
|
||||
#define PT_ACR8 0xB0
|
||||
#define PT_ACR9 0xB4
|
||||
#define PT_ACR10 0xB8
|
||||
#define PT_ACR11 0xBC
|
||||
#define PT_ACR12 0xC0
|
||||
#define PT_ACR13 0xC4
|
||||
#define PT_ACR14 0xC8
|
||||
#define PT_ACR15 0xCC
|
||||
#define PT_ORIGGPR2 0xD0
|
||||
#define PT_FPC 0xD8
|
||||
#define PT_FPR0 0xE0
|
||||
#define PT_FPR1 0xE8
|
||||
#define PT_FPR2 0xF0
|
||||
#define PT_FPR3 0xF8
|
||||
#define PT_FPR4 0x100
|
||||
#define PT_FPR5 0x108
|
||||
#define PT_FPR6 0x110
|
||||
#define PT_FPR7 0x118
|
||||
#define PT_FPR8 0x120
|
||||
#define PT_FPR9 0x128
|
||||
#define PT_FPR10 0x130
|
||||
#define PT_FPR11 0x138
|
||||
#define PT_FPR12 0x140
|
||||
#define PT_FPR13 0x148
|
||||
#define PT_FPR14 0x150
|
||||
#define PT_FPR15 0x158
|
||||
#define PT_CR_9 0x160
|
||||
#define PT_CR_10 0x168
|
||||
#define PT_CR_11 0x170
|
||||
#define PT_IEEE_IP 0x1A8
|
||||
#define PT_LASTOFF PT_IEEE_IP
|
||||
#define PT_ENDREGS 0x1B0-1
|
||||
|
||||
#define GPR_SIZE 8
|
||||
#define CR_SIZE 8
|
||||
|
||||
#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
|
||||
|
||||
#endif /* __s390x__ */
|
||||
|
||||
#define NUM_GPRS 16
|
||||
#define NUM_FPRS 16
|
||||
#define NUM_CRS 16
|
||||
#define NUM_ACRS 16
|
||||
|
||||
#define NUM_CR_WORDS 3
|
||||
|
||||
#define FPR_SIZE 8
|
||||
#define FPC_SIZE 4
|
||||
#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
|
||||
#define ACR_SIZE 4
|
||||
|
||||
|
||||
#define PTRACE_OLDSETOPTIONS 21
|
||||
#define PTRACE_SYSEMU 31
|
||||
#define PTRACE_SYSEMU_SINGLESTEP 32
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef union {
|
||||
float f;
|
||||
double d;
|
||||
__u64 ui;
|
||||
struct
|
||||
{
|
||||
__u32 hi;
|
||||
__u32 lo;
|
||||
} fp;
|
||||
} freg_t;
|
||||
|
||||
typedef struct {
|
||||
__u32 fpc;
|
||||
__u32 pad;
|
||||
freg_t fprs[NUM_FPRS];
|
||||
} s390_fp_regs;
|
||||
|
||||
#define FPC_EXCEPTION_MASK 0xF8000000
|
||||
#define FPC_FLAGS_MASK 0x00F80000
|
||||
#define FPC_DXC_MASK 0x0000FF00
|
||||
#define FPC_RM_MASK 0x00000003
|
||||
|
||||
/* this typedef defines how a Program Status Word looks like */
|
||||
typedef struct {
|
||||
unsigned long mask;
|
||||
unsigned long addr;
|
||||
} __attribute__ ((aligned(8))) psw_t;
|
||||
|
||||
#ifndef __s390x__
|
||||
|
||||
#define PSW_MASK_PER 0x40000000UL
|
||||
#define PSW_MASK_DAT 0x04000000UL
|
||||
#define PSW_MASK_IO 0x02000000UL
|
||||
#define PSW_MASK_EXT 0x01000000UL
|
||||
#define PSW_MASK_KEY 0x00F00000UL
|
||||
#define PSW_MASK_BASE 0x00080000UL /* always one */
|
||||
#define PSW_MASK_MCHECK 0x00040000UL
|
||||
#define PSW_MASK_WAIT 0x00020000UL
|
||||
#define PSW_MASK_PSTATE 0x00010000UL
|
||||
#define PSW_MASK_ASC 0x0000C000UL
|
||||
#define PSW_MASK_CC 0x00003000UL
|
||||
#define PSW_MASK_PM 0x00000F00UL
|
||||
#define PSW_MASK_RI 0x00000000UL
|
||||
#define PSW_MASK_EA 0x00000000UL
|
||||
#define PSW_MASK_BA 0x00000000UL
|
||||
|
||||
#define PSW_MASK_USER 0x0000FF00UL
|
||||
|
||||
#define PSW_ADDR_AMODE 0x80000000UL
|
||||
#define PSW_ADDR_INSN 0x7FFFFFFFUL
|
||||
|
||||
#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
|
||||
|
||||
#define PSW_ASC_PRIMARY 0x00000000UL
|
||||
#define PSW_ASC_ACCREG 0x00004000UL
|
||||
#define PSW_ASC_SECONDARY 0x00008000UL
|
||||
#define PSW_ASC_HOME 0x0000C000UL
|
||||
|
||||
#else /* __s390x__ */
|
||||
|
||||
#define PSW_MASK_PER 0x4000000000000000UL
|
||||
#define PSW_MASK_DAT 0x0400000000000000UL
|
||||
#define PSW_MASK_IO 0x0200000000000000UL
|
||||
#define PSW_MASK_EXT 0x0100000000000000UL
|
||||
#define PSW_MASK_BASE 0x0000000000000000UL
|
||||
#define PSW_MASK_KEY 0x00F0000000000000UL
|
||||
#define PSW_MASK_MCHECK 0x0004000000000000UL
|
||||
#define PSW_MASK_WAIT 0x0002000000000000UL
|
||||
#define PSW_MASK_PSTATE 0x0001000000000000UL
|
||||
#define PSW_MASK_ASC 0x0000C00000000000UL
|
||||
#define PSW_MASK_CC 0x0000300000000000UL
|
||||
#define PSW_MASK_PM 0x00000F0000000000UL
|
||||
#define PSW_MASK_RI 0x0000008000000000UL
|
||||
#define PSW_MASK_EA 0x0000000100000000UL
|
||||
#define PSW_MASK_BA 0x0000000080000000UL
|
||||
|
||||
#define PSW_MASK_USER 0x0000FF0180000000UL
|
||||
|
||||
#define PSW_ADDR_AMODE 0x0000000000000000UL
|
||||
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
|
||||
|
||||
#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
|
||||
|
||||
#define PSW_ASC_PRIMARY 0x0000000000000000UL
|
||||
#define PSW_ASC_ACCREG 0x0000400000000000UL
|
||||
#define PSW_ASC_SECONDARY 0x0000800000000000UL
|
||||
#define PSW_ASC_HOME 0x0000C00000000000UL
|
||||
|
||||
#endif /* __s390x__ */
|
||||
|
||||
|
||||
/*
|
||||
* The s390_regs structure is used to define the elf_gregset_t.
|
||||
*/
|
||||
typedef struct {
|
||||
psw_t psw;
|
||||
unsigned long gprs[NUM_GPRS];
|
||||
unsigned int acrs[NUM_ACRS];
|
||||
unsigned long orig_gpr2;
|
||||
} s390_regs;
|
||||
|
||||
/*
|
||||
* The user_pt_regs structure exports the beginning of
|
||||
* the in-kernel pt_regs structure to user space.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long args[1];
|
||||
psw_t psw;
|
||||
unsigned long gprs[NUM_GPRS];
|
||||
} user_pt_regs;
|
||||
|
||||
/*
|
||||
* Now for the user space program event recording (trace) definitions.
|
||||
* The following structures are used only for the ptrace interface, don't
|
||||
* touch or even look at it if you don't want to modify the user-space
|
||||
* ptrace interface. In particular stay away from it for in-kernel PER.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long cr[NUM_CR_WORDS];
|
||||
} per_cr_words;
|
||||
|
||||
#define PER_EM_MASK 0xE8000000UL
|
||||
|
||||
typedef struct {
|
||||
#ifdef __s390x__
|
||||
unsigned : 32;
|
||||
#endif /* __s390x__ */
|
||||
unsigned em_branching : 1;
|
||||
unsigned em_instruction_fetch : 1;
|
||||
/*
|
||||
* Switching on storage alteration automatically fixes
|
||||
* the storage alteration event bit in the users std.
|
||||
*/
|
||||
unsigned em_storage_alteration : 1;
|
||||
unsigned em_gpr_alt_unused : 1;
|
||||
unsigned em_store_real_address : 1;
|
||||
unsigned : 3;
|
||||
unsigned branch_addr_ctl : 1;
|
||||
unsigned : 1;
|
||||
unsigned storage_alt_space_ctl : 1;
|
||||
unsigned : 21;
|
||||
unsigned long starting_addr;
|
||||
unsigned long ending_addr;
|
||||
} per_cr_bits;
|
||||
|
||||
typedef struct {
|
||||
unsigned short perc_atmid;
|
||||
unsigned long address;
|
||||
unsigned char access_id;
|
||||
} per_lowcore_words;
|
||||
|
||||
typedef struct {
|
||||
unsigned perc_branching : 1;
|
||||
unsigned perc_instruction_fetch : 1;
|
||||
unsigned perc_storage_alteration : 1;
|
||||
unsigned perc_gpr_alt_unused : 1;
|
||||
unsigned perc_store_real_address : 1;
|
||||
unsigned : 3;
|
||||
unsigned atmid_psw_bit_31 : 1;
|
||||
unsigned atmid_validity_bit : 1;
|
||||
unsigned atmid_psw_bit_32 : 1;
|
||||
unsigned atmid_psw_bit_5 : 1;
|
||||
unsigned atmid_psw_bit_16 : 1;
|
||||
unsigned atmid_psw_bit_17 : 1;
|
||||
unsigned si : 2;
|
||||
unsigned long address;
|
||||
unsigned : 4;
|
||||
unsigned access_id : 4;
|
||||
} per_lowcore_bits;
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
per_cr_words words;
|
||||
per_cr_bits bits;
|
||||
} control_regs;
|
||||
/*
|
||||
* The single_step and instruction_fetch bits are obsolete,
|
||||
* the kernel always sets them to zero. To enable single
|
||||
* stepping use ptrace(PTRACE_SINGLESTEP) instead.
|
||||
*/
|
||||
unsigned single_step : 1;
|
||||
unsigned instruction_fetch : 1;
|
||||
unsigned : 30;
|
||||
/*
|
||||
* These addresses are copied into cr10 & cr11 if single
|
||||
* stepping is switched off
|
||||
*/
|
||||
unsigned long starting_addr;
|
||||
unsigned long ending_addr;
|
||||
union {
|
||||
per_lowcore_words words;
|
||||
per_lowcore_bits bits;
|
||||
} lowcore;
|
||||
} per_struct;
|
||||
|
||||
typedef struct {
|
||||
unsigned int len;
|
||||
unsigned long kernel_addr;
|
||||
unsigned long process_addr;
|
||||
} ptrace_area;
|
||||
|
||||
/*
|
||||
* S/390 specific non posix ptrace requests. I chose unusual values so
|
||||
* they are unlikely to clash with future ptrace definitions.
|
||||
*/
|
||||
#define PTRACE_PEEKUSR_AREA 0x5000
|
||||
#define PTRACE_POKEUSR_AREA 0x5001
|
||||
#define PTRACE_PEEKTEXT_AREA 0x5002
|
||||
#define PTRACE_PEEKDATA_AREA 0x5003
|
||||
#define PTRACE_POKETEXT_AREA 0x5004
|
||||
#define PTRACE_POKEDATA_AREA 0x5005
|
||||
#define PTRACE_GET_LAST_BREAK 0x5006
|
||||
#define PTRACE_PEEK_SYSTEM_CALL 0x5007
|
||||
#define PTRACE_POKE_SYSTEM_CALL 0x5008
|
||||
#define PTRACE_ENABLE_TE 0x5009
|
||||
#define PTRACE_DISABLE_TE 0x5010
|
||||
#define PTRACE_TE_ABORT_RAND 0x5011
|
||||
|
||||
/*
|
||||
* The numbers chosen here are somewhat arbitrary but absolutely MUST
|
||||
* not overlap with any of the number assigned in <linux/ptrace.h>.
|
||||
*/
|
||||
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
|
||||
|
||||
/*
|
||||
* PT_PROT definition is loosely based on hppa bsd definition in
|
||||
* gdb/hppab-nat.c
|
||||
*/
|
||||
#define PTRACE_PROT 21
|
||||
|
||||
typedef enum {
|
||||
ptprot_set_access_watchpoint,
|
||||
ptprot_set_write_watchpoint,
|
||||
ptprot_disable_watchpoint
|
||||
} ptprot_flags;
|
||||
|
||||
typedef struct {
|
||||
unsigned long lowaddr;
|
||||
unsigned long hiaddr;
|
||||
ptprot_flags prot;
|
||||
} ptprot_area;
|
||||
|
||||
/* Sequence of bytes for breakpoint illegal instruction. */
|
||||
#define S390_BREAKPOINT {0x0,0x1}
|
||||
#define S390_BREAKPOINT_U16 ((__u16)0x0001)
|
||||
#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
|
||||
#define S390_SYSCALL_SIZE 2
|
||||
|
||||
/*
|
||||
* The user_regs_struct defines the way the user registers are
|
||||
* store on the stack for signal handling.
|
||||
*/
|
||||
struct user_regs_struct {
|
||||
psw_t psw;
|
||||
unsigned long gprs[NUM_GPRS];
|
||||
unsigned int acrs[NUM_ACRS];
|
||||
unsigned long orig_gpr2;
|
||||
s390_fp_regs fp_regs;
|
||||
/*
|
||||
* These per registers are in here so that gdb can modify them
|
||||
* itself as there is no "official" ptrace interface for hardware
|
||||
* watchpoints. This is the way intel does it.
|
||||
*/
|
||||
per_struct per_info;
|
||||
unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _UAPI_S390_PTRACE_H */
|
||||
@@ -28,8 +28,8 @@ PROG COMMANDS
|
||||
=============
|
||||
|
||||
| **bpftool** **prog** { **show** | **list** } [*PROG*]
|
||||
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
|
||||
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
|
||||
| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
|
||||
| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
|
||||
| **bpftool** **prog pin** *PROG* *FILE*
|
||||
| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
|
||||
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
|
||||
@@ -88,7 +88,7 @@ DESCRIPTION
|
||||
programs. On such kernels bpftool will automatically emit this
|
||||
information as well.
|
||||
|
||||
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
|
||||
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
|
||||
Dump eBPF instructions of the programs from the kernel. By
|
||||
default, eBPF will be disassembled and printed to standard
|
||||
output in human-readable format. In this case, **opcodes**
|
||||
@@ -106,11 +106,10 @@ DESCRIPTION
|
||||
CFG in DOT format, on standard output.
|
||||
|
||||
If the programs have line_info available, the source line will
|
||||
be displayed by default. If **linum** is specified,
|
||||
the filename, line number and line column will also be
|
||||
displayed on top of the source line.
|
||||
be displayed. If **linum** is specified, the filename, line
|
||||
number and line column will also be displayed.
|
||||
|
||||
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }]
|
||||
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
|
||||
Dump jited image (host machine code) of the program.
|
||||
|
||||
If *FILE* is specified image will be written to a file,
|
||||
@@ -120,9 +119,8 @@ DESCRIPTION
|
||||
**opcodes** controls if raw opcodes will be printed.
|
||||
|
||||
If the prog has line_info available, the source line will
|
||||
be displayed by default. If **linum** is specified,
|
||||
the filename, line number and line column will also be
|
||||
displayed on top of the source line.
|
||||
be displayed. If **linum** is specified, the filename, line
|
||||
number and line column will also be displayed.
|
||||
|
||||
**bpftool prog pin** *PROG* *FILE*
|
||||
Pin program *PROG* as *FILE*.
|
||||
|
||||
@@ -26,7 +26,7 @@ STRUCT_OPS COMMANDS
|
||||
|
||||
| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*]
|
||||
| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*]
|
||||
| **bpftool** **struct_ops register** *OBJ*
|
||||
| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*]
|
||||
| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP*
|
||||
| **bpftool** **struct_ops help**
|
||||
|
|
||||
@@ -51,10 +51,14 @@ DESCRIPTION
|
||||
for the given struct_ops. Otherwise, it dumps all struct_ops
|
||||
currently existing in the system.
|
||||
|
||||
**bpftool struct_ops register** *OBJ*
|
||||
**bpftool struct_ops register** *OBJ* [*LINK_DIR*]
|
||||
Register bpf struct_ops from *OBJ*. All struct_ops under
|
||||
the ELF section ".struct_ops" will be registered to
|
||||
its kernel subsystem.
|
||||
the ELF section ".struct_ops" and ".struct_ops.link" will
|
||||
be registered to its kernel subsystem. For each
|
||||
struct_ops in the ".struct_ops.link" section, a link
|
||||
will be created. You can give *LINK_DIR* to provide a
|
||||
directory path where these links will be pinned with the
|
||||
same name as their corresponding map name.
|
||||
|
||||
**bpftool struct_ops unregister** *STRUCT_OPS_MAP*
|
||||
Unregister the *STRUCT_OPS_MAP* from the kernel subsystem.
|
||||
|
||||
@@ -255,20 +255,23 @@ _bpftool_map_update_get_name()
|
||||
|
||||
_bpftool()
|
||||
{
|
||||
local cur prev words objword
|
||||
local cur prev words objword json=0
|
||||
_init_completion || return
|
||||
|
||||
# Deal with options
|
||||
if [[ ${words[cword]} == -* ]]; then
|
||||
local c='--version --json --pretty --bpffs --mapcompat --debug \
|
||||
--use-loader --base-btf'
|
||||
--use-loader --base-btf'
|
||||
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
|
||||
return 0
|
||||
fi
|
||||
if _bpftool_search_list -j --json -p --pretty; then
|
||||
json=1
|
||||
fi
|
||||
|
||||
# Deal with simplest keywords
|
||||
case $prev in
|
||||
help|hex|opcodes|visual|linum)
|
||||
help|hex)
|
||||
return 0
|
||||
;;
|
||||
tag)
|
||||
@@ -366,13 +369,16 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'file'
|
||||
if _bpftool_search_list 'xlated'; then
|
||||
COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
|
||||
"$cur" ) )
|
||||
else
|
||||
COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
|
||||
"$cur" ) )
|
||||
# "file" is not compatible with other keywords here
|
||||
if _bpftool_search_list 'file'; then
|
||||
return 0
|
||||
fi
|
||||
if ! _bpftool_search_list 'linum opcodes visual'; then
|
||||
_bpftool_once_attr 'file'
|
||||
fi
|
||||
_bpftool_once_attr 'linum opcodes'
|
||||
if _bpftool_search_list 'xlated' && [[ "$json" == 0 ]]; then
|
||||
_bpftool_once_attr 'visual'
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
@@ -502,10 +508,7 @@ _bpftool()
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
|
||||
_bpftool_once_attr 'type'
|
||||
_bpftool_once_attr 'dev'
|
||||
_bpftool_once_attr 'pinmaps'
|
||||
_bpftool_once_attr 'autoattach'
|
||||
_bpftool_once_attr 'type dev pinmaps autoattach'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
@@ -730,16 +733,10 @@ _bpftool()
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'type'
|
||||
_bpftool_once_attr 'key'
|
||||
_bpftool_once_attr 'value'
|
||||
_bpftool_once_attr 'entries'
|
||||
_bpftool_once_attr 'name'
|
||||
_bpftool_once_attr 'flags'
|
||||
_bpftool_once_attr 'type key value entries name flags dev'
|
||||
if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
|
||||
_bpftool_once_attr 'inner_map'
|
||||
fi
|
||||
_bpftool_once_attr 'dev'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
@@ -880,8 +877,7 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'cpu'
|
||||
_bpftool_once_attr 'index'
|
||||
_bpftool_once_attr 'cpu index'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -821,3 +821,86 @@ void btf_dump_linfo_json(const struct btf *btf,
|
||||
BPF_LINE_INFO_LINE_COL(linfo->line_col));
|
||||
}
|
||||
}
|
||||
|
||||
static void dotlabel_puts(const char *s)
|
||||
{
|
||||
for (; *s; ++s) {
|
||||
switch (*s) {
|
||||
case '\\':
|
||||
case '"':
|
||||
case '{':
|
||||
case '}':
|
||||
case '<':
|
||||
case '>':
|
||||
case '|':
|
||||
case ' ':
|
||||
putchar('\\');
|
||||
/* fallthrough */
|
||||
default:
|
||||
putchar(*s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const char *shorten_path(const char *path)
|
||||
{
|
||||
const unsigned int MAX_PATH_LEN = 32;
|
||||
size_t len = strlen(path);
|
||||
const char *shortpath;
|
||||
|
||||
if (len <= MAX_PATH_LEN)
|
||||
return path;
|
||||
|
||||
/* Search for last '/' under the MAX_PATH_LEN limit */
|
||||
shortpath = strchr(path + len - MAX_PATH_LEN, '/');
|
||||
if (shortpath) {
|
||||
if (shortpath < path + strlen("..."))
|
||||
/* We removed a very short prefix, e.g. "/w", and we'll
|
||||
* make the path longer by prefixing with the ellipsis.
|
||||
* Not worth it, keep initial path.
|
||||
*/
|
||||
return path;
|
||||
return shortpath;
|
||||
}
|
||||
|
||||
/* File base name length is > MAX_PATH_LEN, search for last '/' */
|
||||
shortpath = strrchr(path, '/');
|
||||
if (shortpath)
|
||||
return shortpath;
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
void btf_dump_linfo_dotlabel(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum)
|
||||
{
|
||||
const char *line = btf__name_by_offset(btf, linfo->line_off);
|
||||
|
||||
if (!line || !strlen(line))
|
||||
return;
|
||||
line = ltrim(line);
|
||||
|
||||
if (linum) {
|
||||
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
|
||||
const char *shortfile;
|
||||
|
||||
/* More forgiving on file because linum option is
|
||||
* expected to provide more info than the already
|
||||
* available src line.
|
||||
*/
|
||||
if (!file)
|
||||
shortfile = "";
|
||||
else
|
||||
shortfile = shorten_path(file);
|
||||
|
||||
printf("; [%s", shortfile > file ? "..." : "");
|
||||
dotlabel_puts(shortfile);
|
||||
printf(" line:%u col:%u]\\l\\\n",
|
||||
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
|
||||
BPF_LINE_INFO_LINE_COL(linfo->line_col));
|
||||
}
|
||||
|
||||
printf("; ");
|
||||
dotlabel_puts(line);
|
||||
printf("\\l\\\n");
|
||||
}
|
||||
|
||||
@@ -380,7 +380,9 @@ static void cfg_destroy(struct cfg *cfg)
|
||||
}
|
||||
}
|
||||
|
||||
static void draw_bb_node(struct func_node *func, struct bb_node *bb)
|
||||
static void
|
||||
draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
const char *shape;
|
||||
|
||||
@@ -398,13 +400,10 @@ static void draw_bb_node(struct func_node *func, struct bb_node *bb)
|
||||
printf("EXIT");
|
||||
} else {
|
||||
unsigned int start_idx;
|
||||
struct dump_data dd = {};
|
||||
|
||||
printf("{");
|
||||
kernel_syms_load(&dd);
|
||||
printf("{\\\n");
|
||||
start_idx = bb->head - func->start;
|
||||
dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
|
||||
kernel_syms_destroy(&dd);
|
||||
dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
|
||||
opcodes, linum);
|
||||
printf("}");
|
||||
}
|
||||
|
||||
@@ -430,12 +429,14 @@ static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
|
||||
}
|
||||
}
|
||||
|
||||
static void func_output_bb_def(struct func_node *func)
|
||||
static void
|
||||
func_output_bb_def(struct func_node *func, struct dump_data *dd,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
struct bb_node *bb;
|
||||
|
||||
list_for_each_entry(bb, &func->bbs, l) {
|
||||
draw_bb_node(func, bb);
|
||||
draw_bb_node(func, bb, dd, opcodes, linum);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -455,7 +456,8 @@ static void func_output_edges(struct func_node *func)
|
||||
func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
|
||||
}
|
||||
|
||||
static void cfg_dump(struct cfg *cfg)
|
||||
static void
|
||||
cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
|
||||
{
|
||||
struct func_node *func;
|
||||
|
||||
@@ -463,14 +465,15 @@ static void cfg_dump(struct cfg *cfg)
|
||||
list_for_each_entry(func, &cfg->funcs, l) {
|
||||
printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
|
||||
func->idx, func->idx);
|
||||
func_output_bb_def(func);
|
||||
func_output_bb_def(func, dd, opcodes, linum);
|
||||
func_output_edges(func);
|
||||
printf("}\n");
|
||||
}
|
||||
printf("}\n");
|
||||
}
|
||||
|
||||
void dump_xlated_cfg(void *buf, unsigned int len)
|
||||
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
struct bpf_insn *insn = buf;
|
||||
struct cfg cfg;
|
||||
@@ -479,7 +482,7 @@ void dump_xlated_cfg(void *buf, unsigned int len)
|
||||
if (cfg_build(&cfg, insn, len))
|
||||
return;
|
||||
|
||||
cfg_dump(&cfg);
|
||||
cfg_dump(&cfg, dd, opcodes, linum);
|
||||
|
||||
cfg_destroy(&cfg);
|
||||
}
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
#ifndef __BPF_TOOL_CFG_H
|
||||
#define __BPF_TOOL_CFG_H
|
||||
|
||||
void dump_xlated_cfg(void *buf, unsigned int len);
|
||||
#include "xlated_dumper.h"
|
||||
|
||||
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum);
|
||||
|
||||
#endif /* __BPF_TOOL_CFG_H */
|
||||
|
||||
@@ -1091,3 +1091,17 @@ const char *bpf_attach_type_input_str(enum bpf_attach_type t)
|
||||
default: return libbpf_bpf_attach_type_str(t);
|
||||
}
|
||||
}
|
||||
|
||||
int pathname_concat(char *buf, int buf_sz, const char *path,
|
||||
const char *name)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(buf, buf_sz, "%s/%s", path, name);
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
if (len >= buf_sz)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -75,14 +75,11 @@ static void jsonw_puts(json_writer_t *self, const char *str)
|
||||
fputs("\\b", self->out);
|
||||
break;
|
||||
case '\\':
|
||||
fputs("\\n", self->out);
|
||||
fputs("\\\\", self->out);
|
||||
break;
|
||||
case '"':
|
||||
fputs("\\\"", self->out);
|
||||
break;
|
||||
case '\'':
|
||||
fputs("\\\'", self->out);
|
||||
break;
|
||||
default:
|
||||
putc(*str, self->out);
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Opaque class structure */
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
#include <errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter_arp.h>
|
||||
#include <net/if.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
@@ -135,6 +137,18 @@ static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
|
||||
}
|
||||
}
|
||||
|
||||
void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
|
||||
{
|
||||
jsonw_uint_field(json_wtr, "pf",
|
||||
info->netfilter.pf);
|
||||
jsonw_uint_field(json_wtr, "hook",
|
||||
info->netfilter.hooknum);
|
||||
jsonw_int_field(json_wtr, "prio",
|
||||
info->netfilter.priority);
|
||||
jsonw_uint_field(json_wtr, "flags",
|
||||
info->netfilter.flags);
|
||||
}
|
||||
|
||||
static int get_prog_info(int prog_id, struct bpf_prog_info *info)
|
||||
{
|
||||
__u32 len = sizeof(*info);
|
||||
@@ -195,6 +209,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
info->netns.netns_ino);
|
||||
show_link_attach_type_json(info->netns.attach_type, json_wtr);
|
||||
break;
|
||||
case BPF_LINK_TYPE_NETFILTER:
|
||||
netfilter_dump_json(info, json_wtr);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -263,6 +281,68 @@ static void show_iter_plain(struct bpf_link_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static const char * const pf2name[] = {
|
||||
[NFPROTO_INET] = "inet",
|
||||
[NFPROTO_IPV4] = "ip",
|
||||
[NFPROTO_ARP] = "arp",
|
||||
[NFPROTO_NETDEV] = "netdev",
|
||||
[NFPROTO_BRIDGE] = "bridge",
|
||||
[NFPROTO_IPV6] = "ip6",
|
||||
};
|
||||
|
||||
static const char * const inethook2name[] = {
|
||||
[NF_INET_PRE_ROUTING] = "prerouting",
|
||||
[NF_INET_LOCAL_IN] = "input",
|
||||
[NF_INET_FORWARD] = "forward",
|
||||
[NF_INET_LOCAL_OUT] = "output",
|
||||
[NF_INET_POST_ROUTING] = "postrouting",
|
||||
};
|
||||
|
||||
static const char * const arphook2name[] = {
|
||||
[NF_ARP_IN] = "input",
|
||||
[NF_ARP_OUT] = "output",
|
||||
};
|
||||
|
||||
void netfilter_dump_plain(const struct bpf_link_info *info)
|
||||
{
|
||||
const char *hookname = NULL, *pfname = NULL;
|
||||
unsigned int hook = info->netfilter.hooknum;
|
||||
unsigned int pf = info->netfilter.pf;
|
||||
|
||||
if (pf < ARRAY_SIZE(pf2name))
|
||||
pfname = pf2name[pf];
|
||||
|
||||
switch (pf) {
|
||||
case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
|
||||
case NFPROTO_IPV4:
|
||||
case NFPROTO_IPV6:
|
||||
case NFPROTO_INET:
|
||||
if (hook < ARRAY_SIZE(inethook2name))
|
||||
hookname = inethook2name[hook];
|
||||
break;
|
||||
case NFPROTO_ARP:
|
||||
if (hook < ARRAY_SIZE(arphook2name))
|
||||
hookname = arphook2name[hook];
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (pfname)
|
||||
printf("\n\t%s", pfname);
|
||||
else
|
||||
printf("\n\tpf: %d", pf);
|
||||
|
||||
if (hookname)
|
||||
printf(" %s", hookname);
|
||||
else
|
||||
printf(", hook %u,", hook);
|
||||
|
||||
printf(" prio %d", info->netfilter.priority);
|
||||
|
||||
if (info->netfilter.flags)
|
||||
printf(" flags 0x%x", info->netfilter.flags);
|
||||
}
|
||||
|
||||
static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
@@ -301,6 +381,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
printf("\n\tnetns_ino %u ", info->netns.netns_ino);
|
||||
show_link_attach_type_plain(info->netns.attach_type);
|
||||
break;
|
||||
case BPF_LINK_TYPE_NETFILTER:
|
||||
netfilter_dump_plain(info);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -229,6 +229,8 @@ void btf_dump_linfo_plain(const struct btf *btf,
|
||||
const char *prefix, bool linum);
|
||||
void btf_dump_linfo_json(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum);
|
||||
void btf_dump_linfo_dotlabel(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum);
|
||||
|
||||
struct nlattr;
|
||||
struct ifinfomsg;
|
||||
@@ -262,4 +264,10 @@ static inline bool hashmap__empty(struct hashmap *map)
|
||||
return map ? hashmap__size(map) == 0 : true;
|
||||
}
|
||||
|
||||
int pathname_concat(char *buf, int buf_sz, const char *path,
|
||||
const char *name);
|
||||
|
||||
/* print netfilter bpf_link info */
|
||||
void netfilter_dump_plain(const struct bpf_link_info *info);
|
||||
void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr);
|
||||
#endif
|
||||
|
||||
@@ -647,6 +647,108 @@ static int do_detach(int argc, char **argv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netfilter_link_compar(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_link_info *nfa = a;
|
||||
const struct bpf_link_info *nfb = b;
|
||||
int delta;
|
||||
|
||||
delta = nfa->netfilter.pf - nfb->netfilter.pf;
|
||||
if (delta)
|
||||
return delta;
|
||||
|
||||
delta = nfa->netfilter.hooknum - nfb->netfilter.hooknum;
|
||||
if (delta)
|
||||
return delta;
|
||||
|
||||
if (nfa->netfilter.priority < nfb->netfilter.priority)
|
||||
return -1;
|
||||
if (nfa->netfilter.priority > nfb->netfilter.priority)
|
||||
return 1;
|
||||
|
||||
return nfa->netfilter.flags - nfb->netfilter.flags;
|
||||
}
|
||||
|
||||
static void show_link_netfilter(void)
|
||||
{
|
||||
unsigned int nf_link_len = 0, nf_link_count = 0;
|
||||
struct bpf_link_info *nf_link_info = NULL;
|
||||
__u32 id = 0;
|
||||
|
||||
while (true) {
|
||||
struct bpf_link_info info;
|
||||
int fd, err;
|
||||
__u32 len;
|
||||
|
||||
err = bpf_link_get_next_id(id, &id);
|
||||
if (err) {
|
||||
if (errno == ENOENT)
|
||||
break;
|
||||
p_err("can't get next link: %s (id %d)", strerror(errno), id);
|
||||
break;
|
||||
}
|
||||
|
||||
fd = bpf_link_get_fd_by_id(id);
|
||||
if (fd < 0) {
|
||||
p_err("can't get link by id (%u): %s", id, strerror(errno));
|
||||
continue;
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
len = sizeof(info);
|
||||
|
||||
err = bpf_link_get_info_by_fd(fd, &info, &len);
|
||||
|
||||
close(fd);
|
||||
|
||||
if (err) {
|
||||
p_err("can't get link info for fd %d: %s", fd, strerror(errno));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (info.type != BPF_LINK_TYPE_NETFILTER)
|
||||
continue;
|
||||
|
||||
if (nf_link_count >= nf_link_len) {
|
||||
static const unsigned int max_link_count = INT_MAX / sizeof(info);
|
||||
struct bpf_link_info *expand;
|
||||
|
||||
if (nf_link_count > max_link_count) {
|
||||
p_err("cannot handle more than %u links\n", max_link_count);
|
||||
break;
|
||||
}
|
||||
|
||||
nf_link_len += 16;
|
||||
|
||||
expand = realloc(nf_link_info, nf_link_len * sizeof(info));
|
||||
if (!expand) {
|
||||
p_err("realloc: %s", strerror(errno));
|
||||
break;
|
||||
}
|
||||
|
||||
nf_link_info = expand;
|
||||
}
|
||||
|
||||
nf_link_info[nf_link_count] = info;
|
||||
nf_link_count++;
|
||||
}
|
||||
|
||||
qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
|
||||
|
||||
for (id = 0; id < nf_link_count; id++) {
|
||||
NET_START_OBJECT;
|
||||
if (json_output)
|
||||
netfilter_dump_json(&nf_link_info[id], json_wtr);
|
||||
else
|
||||
netfilter_dump_plain(&nf_link_info[id]);
|
||||
|
||||
NET_DUMP_UINT("id", " prog_id %u", nf_link_info[id].prog_id);
|
||||
NET_END_OBJECT;
|
||||
}
|
||||
|
||||
free(nf_link_info);
|
||||
}
|
||||
|
||||
static int do_show(int argc, char **argv)
|
||||
{
|
||||
struct bpf_attach_info attach_info = {};
|
||||
@@ -701,6 +803,10 @@ static int do_show(int argc, char **argv)
|
||||
NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id);
|
||||
NET_END_ARRAY("\n");
|
||||
|
||||
NET_START_ARRAY("netfilter", "%s:\n");
|
||||
show_link_netfilter();
|
||||
NET_END_ARRAY("\n");
|
||||
|
||||
NET_END_OBJECT;
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
@@ -840,11 +840,6 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
false))
|
||||
goto exit_free;
|
||||
}
|
||||
} else if (visual) {
|
||||
if (json_output)
|
||||
jsonw_null(json_wtr);
|
||||
else
|
||||
dump_xlated_cfg(buf, member_len);
|
||||
} else {
|
||||
kernel_syms_load(&dd);
|
||||
dd.nr_jited_ksyms = info->nr_jited_ksyms;
|
||||
@@ -855,11 +850,11 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
dd.prog_linfo = prog_linfo;
|
||||
|
||||
if (json_output)
|
||||
dump_xlated_json(&dd, buf, member_len, opcodes,
|
||||
linum);
|
||||
dump_xlated_json(&dd, buf, member_len, opcodes, linum);
|
||||
else if (visual)
|
||||
dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
|
||||
else
|
||||
dump_xlated_plain(&dd, buf, member_len, opcodes,
|
||||
linum);
|
||||
dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
|
||||
kernel_syms_destroy(&dd);
|
||||
}
|
||||
|
||||
@@ -910,37 +905,46 @@ static int do_dump(int argc, char **argv)
|
||||
if (nb_fds < 1)
|
||||
goto exit_free;
|
||||
|
||||
if (is_prefix(*argv, "file")) {
|
||||
NEXT_ARG();
|
||||
if (!argc) {
|
||||
p_err("expected file path");
|
||||
goto exit_close;
|
||||
}
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
while (argc) {
|
||||
if (is_prefix(*argv, "file")) {
|
||||
NEXT_ARG();
|
||||
if (!argc) {
|
||||
p_err("expected file path");
|
||||
goto exit_close;
|
||||
}
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
filepath = *argv;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "opcodes")) {
|
||||
opcodes = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "visual")) {
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
filepath = *argv;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "opcodes")) {
|
||||
opcodes = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "visual")) {
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
visual = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "linum")) {
|
||||
linum = true;
|
||||
NEXT_ARG();
|
||||
} else {
|
||||
usage();
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
visual = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "linum")) {
|
||||
linum = true;
|
||||
NEXT_ARG();
|
||||
}
|
||||
|
||||
if (argc) {
|
||||
usage();
|
||||
if (filepath && (opcodes || visual || linum)) {
|
||||
p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
|
||||
goto exit_close;
|
||||
}
|
||||
if (json_output && visual) {
|
||||
p_err("'visual' is not compatible with JSON output");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
@@ -1472,19 +1476,6 @@ auto_attach_program(struct bpf_program *prog, const char *path)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(buf, buf_sz, "%s/%s", path, name);
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
if ((size_t)len >= buf_sz)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
auto_attach_programs(struct bpf_object *obj, const char *path)
|
||||
{
|
||||
@@ -1681,7 +1672,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
}
|
||||
|
||||
bpf_program__set_ifindex(pos, ifindex);
|
||||
bpf_program__set_type(pos, prog_type);
|
||||
if (bpf_program__type(pos) != prog_type)
|
||||
bpf_program__set_type(pos, prog_type);
|
||||
bpf_program__set_expected_attach_type(pos, expected_attach_type);
|
||||
}
|
||||
|
||||
@@ -2420,8 +2412,8 @@ static int do_help(int argc, char **argv)
|
||||
|
||||
fprintf(stderr,
|
||||
"Usage: %1$s %2$s { show | list } [PROG]\n"
|
||||
" %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
|
||||
" %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n"
|
||||
" %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
|
||||
" %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
|
||||
" %1$s %2$s pin PROG FILE\n"
|
||||
" %1$s %2$s { load | loadall } OBJ PATH \\\n"
|
||||
" [type TYPE] [dev NAME] \\\n"
|
||||
|
||||
@@ -475,21 +475,44 @@ static int do_unregister(int argc, char **argv)
|
||||
return cmd_retval(&res, true);
|
||||
}
|
||||
|
||||
static int pin_link(struct bpf_link *link, const char *pindir,
|
||||
const char *name)
|
||||
{
|
||||
char pinfile[PATH_MAX];
|
||||
int err;
|
||||
|
||||
err = pathname_concat(pinfile, sizeof(pinfile), pindir, name);
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
return bpf_link__pin(link, pinfile);
|
||||
}
|
||||
|
||||
static int do_register(int argc, char **argv)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, open_opts);
|
||||
__u32 link_info_len = sizeof(struct bpf_link_info);
|
||||
struct bpf_link_info link_info = {};
|
||||
struct bpf_map_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
int nr_errs = 0, nr_maps = 0;
|
||||
const char *linkdir = NULL;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_link *link;
|
||||
struct bpf_map *map;
|
||||
const char *file;
|
||||
|
||||
if (argc != 1)
|
||||
if (argc != 1 && argc != 2)
|
||||
usage();
|
||||
|
||||
file = GET_ARG();
|
||||
if (argc == 1)
|
||||
linkdir = GET_ARG();
|
||||
|
||||
if (linkdir && mount_bpffs_for_pin(linkdir)) {
|
||||
p_err("can't mount bpffs for pinning");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
@@ -519,21 +542,44 @@ static int do_register(int argc, char **argv)
|
||||
}
|
||||
nr_maps++;
|
||||
|
||||
bpf_link__disconnect(link);
|
||||
bpf_link__destroy(link);
|
||||
|
||||
if (!bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
|
||||
&info_len))
|
||||
p_info("Registered %s %s id %u",
|
||||
get_kern_struct_ops_name(&info),
|
||||
bpf_map__name(map),
|
||||
info.id);
|
||||
else
|
||||
if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
|
||||
&info_len)) {
|
||||
/* Not p_err. The struct_ops was attached
|
||||
* successfully.
|
||||
*/
|
||||
p_info("Registered %s but can't find id: %s",
|
||||
bpf_map__name(map), strerror(errno));
|
||||
goto clean_link;
|
||||
}
|
||||
if (!(bpf_map__map_flags(map) & BPF_F_LINK)) {
|
||||
p_info("Registered %s %s id %u",
|
||||
get_kern_struct_ops_name(&info),
|
||||
info.name,
|
||||
info.id);
|
||||
goto clean_link;
|
||||
}
|
||||
if (bpf_link_get_info_by_fd(bpf_link__fd(link),
|
||||
&link_info,
|
||||
&link_info_len)) {
|
||||
p_err("Registered %s but can't find link id: %s",
|
||||
bpf_map__name(map), strerror(errno));
|
||||
nr_errs++;
|
||||
goto clean_link;
|
||||
}
|
||||
if (linkdir && pin_link(link, linkdir, info.name)) {
|
||||
p_err("can't pin link %u for %s: %s",
|
||||
link_info.id, info.name,
|
||||
strerror(errno));
|
||||
nr_errs++;
|
||||
goto clean_link;
|
||||
}
|
||||
p_info("Registered %s %s map id %u link id %u",
|
||||
get_kern_struct_ops_name(&info),
|
||||
info.name, info.id, link_info.id);
|
||||
|
||||
clean_link:
|
||||
bpf_link__disconnect(link);
|
||||
bpf_link__destroy(link);
|
||||
}
|
||||
|
||||
bpf_object__close(obj);
|
||||
@@ -562,7 +608,7 @@ static int do_help(int argc, char **argv)
|
||||
fprintf(stderr,
|
||||
"Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n"
|
||||
" %1$s %2$s dump [STRUCT_OPS_MAP]\n"
|
||||
" %1$s %2$s register OBJ\n"
|
||||
" %1$s %2$s register OBJ [LINK_DIR]\n"
|
||||
" %1$s %2$s unregister STRUCT_OPS_MAP\n"
|
||||
" %1$s %2$s help\n"
|
||||
"\n"
|
||||
|
||||
@@ -361,7 +361,8 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
|
||||
}
|
||||
|
||||
void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
|
||||
unsigned int start_idx)
|
||||
unsigned int start_idx,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn_for_graph,
|
||||
@@ -369,14 +370,61 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
|
||||
.cb_imm = print_imm,
|
||||
.private_data = dd,
|
||||
};
|
||||
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
|
||||
const struct bpf_line_info *last_linfo = NULL;
|
||||
struct bpf_func_info *record = dd->func_info;
|
||||
struct bpf_insn *insn_start = buf_start;
|
||||
struct bpf_insn *insn_end = buf_end;
|
||||
struct bpf_insn *cur = insn_start;
|
||||
struct btf *btf = dd->btf;
|
||||
bool double_insn = false;
|
||||
char func_sig[1024];
|
||||
|
||||
for (; cur <= insn_end; cur++) {
|
||||
printf("% 4d: ", (int)(cur - insn_start + start_idx));
|
||||
unsigned int insn_off;
|
||||
|
||||
if (double_insn) {
|
||||
double_insn = false;
|
||||
continue;
|
||||
}
|
||||
double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
|
||||
insn_off = (unsigned int)(cur - insn_start + start_idx);
|
||||
if (btf && record) {
|
||||
if (record->insn_off == insn_off) {
|
||||
btf_dumper_type_only(btf, record->type_id,
|
||||
func_sig,
|
||||
sizeof(func_sig));
|
||||
if (func_sig[0] != '\0')
|
||||
printf("; %s:\\l\\\n", func_sig);
|
||||
record = (void *)record + dd->finfo_rec_size;
|
||||
}
|
||||
}
|
||||
|
||||
if (prog_linfo) {
|
||||
const struct bpf_line_info *linfo;
|
||||
|
||||
linfo = bpf_prog_linfo__lfind(prog_linfo, insn_off, 0);
|
||||
if (linfo && linfo != last_linfo) {
|
||||
btf_dump_linfo_dotlabel(btf, linfo, linum);
|
||||
last_linfo = linfo;
|
||||
}
|
||||
}
|
||||
|
||||
printf("%d: ", insn_off);
|
||||
print_bpf_insn(&cbs, cur, true);
|
||||
|
||||
if (opcodes) {
|
||||
printf("\\ \\ \\ \\ ");
|
||||
fprint_hex(stdout, cur, 8, " ");
|
||||
if (double_insn && cur <= insn_end - 1) {
|
||||
printf(" ");
|
||||
fprint_hex(stdout, cur + 1, 8, " ");
|
||||
}
|
||||
printf("\\l\\\n");
|
||||
}
|
||||
|
||||
if (cur != insn_end)
|
||||
printf(" | ");
|
||||
printf("| ");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
|
||||
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum);
|
||||
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
|
||||
unsigned int start_index);
|
||||
unsigned int start_index,
|
||||
bool opcodes, bool linum);
|
||||
|
||||
#endif
|
||||
|
||||
1
tools/bpf/resolve_btfids/.gitignore
vendored
1
tools/bpf/resolve_btfids/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
/fixdep
|
||||
/resolve_btfids
|
||||
/libbpf/
|
||||
/libsubcmd/
|
||||
|
||||
@@ -986,6 +986,7 @@ enum bpf_prog_type {
|
||||
BPF_PROG_TYPE_LSM,
|
||||
BPF_PROG_TYPE_SK_LOOKUP,
|
||||
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
|
||||
BPF_PROG_TYPE_NETFILTER,
|
||||
};
|
||||
|
||||
enum bpf_attach_type {
|
||||
@@ -1033,6 +1034,7 @@ enum bpf_attach_type {
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
BPF_STRUCT_OPS,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
@@ -1049,6 +1051,7 @@ enum bpf_link_type {
|
||||
BPF_LINK_TYPE_PERF_EVENT = 7,
|
||||
BPF_LINK_TYPE_KPROBE_MULTI = 8,
|
||||
BPF_LINK_TYPE_STRUCT_OPS = 9,
|
||||
BPF_LINK_TYPE_NETFILTER = 10,
|
||||
|
||||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
@@ -1108,7 +1111,7 @@ enum bpf_link_type {
|
||||
*/
|
||||
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
|
||||
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||
* verifier will allow any alignment whatsoever. On platforms
|
||||
* with strict alignment requirements for loads ands stores (such
|
||||
* as sparc and mips) the verifier validates that all loads and
|
||||
@@ -1266,6 +1269,9 @@ enum {
|
||||
|
||||
/* Create a map that is suitable to be an inner map with dynamic max entries */
|
||||
BPF_F_INNER_MAP = (1U << 12),
|
||||
|
||||
/* Create a map that will be registered/unregesitered by the backed bpf_link */
|
||||
BPF_F_LINK = (1U << 13),
|
||||
};
|
||||
|
||||
/* Flags for BPF_PROG_QUERY. */
|
||||
@@ -1403,6 +1409,11 @@ union bpf_attr {
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@@ -1488,6 +1499,11 @@ union bpf_attr {
|
||||
__u32 btf_size;
|
||||
__u32 btf_log_size;
|
||||
__u32 btf_log_level;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
};
|
||||
|
||||
struct {
|
||||
@@ -1507,7 +1523,10 @@ union bpf_attr {
|
||||
} task_fd_query;
|
||||
|
||||
struct { /* struct used by BPF_LINK_CREATE command */
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
union {
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
__u32 map_fd; /* struct_ops to attach */
|
||||
};
|
||||
union {
|
||||
__u32 target_fd; /* object to attach to */
|
||||
__u32 target_ifindex; /* target ifindex */
|
||||
@@ -1543,17 +1562,34 @@ union bpf_attr {
|
||||
*/
|
||||
__u64 cookie;
|
||||
} tracing;
|
||||
struct {
|
||||
__u32 pf;
|
||||
__u32 hooknum;
|
||||
__s32 priority;
|
||||
__u32 flags;
|
||||
} netfilter;
|
||||
};
|
||||
} link_create;
|
||||
|
||||
struct { /* struct used by BPF_LINK_UPDATE command */
|
||||
__u32 link_fd; /* link fd */
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
union {
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
/* new struct_ops map fd to update link with */
|
||||
__u32 new_map_fd;
|
||||
};
|
||||
__u32 flags; /* extra flags */
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags */
|
||||
__u32 old_prog_fd;
|
||||
union {
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags.
|
||||
*/
|
||||
__u32 old_prog_fd;
|
||||
/* expected link's map fd; is specified only
|
||||
* if BPF_F_REPLACE flag is set.
|
||||
*/
|
||||
__u32 old_map_fd;
|
||||
};
|
||||
} link_update;
|
||||
|
||||
struct {
|
||||
@@ -1647,17 +1683,17 @@ union bpf_attr {
|
||||
* Description
|
||||
* This helper is a "printk()-like" facility for debugging. It
|
||||
* prints a message defined by format *fmt* (of size *fmt_size*)
|
||||
* to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
|
||||
* to file *\/sys/kernel/tracing/trace* from TraceFS, if
|
||||
* available. It can take up to three additional **u64**
|
||||
* arguments (as an eBPF helpers, the total number of arguments is
|
||||
* limited to five).
|
||||
*
|
||||
* Each time the helper is called, it appends a line to the trace.
|
||||
* Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
|
||||
* open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
|
||||
* Lines are discarded while *\/sys/kernel/tracing/trace* is
|
||||
* open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
|
||||
* The format of the trace is customizable, and the exact output
|
||||
* one will get depends on the options set in
|
||||
* *\/sys/kernel/debug/tracing/trace_options* (see also the
|
||||
* *\/sys/kernel/tracing/trace_options* (see also the
|
||||
* *README* file under the same directory). However, it usually
|
||||
* defaults to something like:
|
||||
*
|
||||
@@ -4969,6 +5005,12 @@ union bpf_attr {
|
||||
* different maps if key/value layout matches across maps.
|
||||
* Every bpf_timer_set_callback() can have different callback_fn.
|
||||
*
|
||||
* *flags* can be one of:
|
||||
*
|
||||
* **BPF_F_TIMER_ABS**
|
||||
* Start the timer in absolute expire value instead of the
|
||||
* default relative one.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
|
||||
@@ -5325,11 +5367,22 @@ union bpf_attr {
|
||||
* Description
|
||||
* Write *len* bytes from *src* into *dst*, starting from *offset*
|
||||
* into *dst*.
|
||||
* *flags* is currently unused.
|
||||
*
|
||||
* *flags* must be 0 except for skb-type dynptrs.
|
||||
*
|
||||
* For skb-type dynptrs:
|
||||
* * All data slices of the dynptr are automatically
|
||||
* invalidated after **bpf_dynptr_write**\ (). This is
|
||||
* because writing may pull the skb and change the
|
||||
* underlying packet buffer.
|
||||
*
|
||||
* * For *flags*, please see the flags accepted by
|
||||
* **bpf_skb_store_bytes**\ ().
|
||||
* Return
|
||||
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
|
||||
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
|
||||
* is a read-only dynptr or if *flags* is not 0.
|
||||
* is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
|
||||
* other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
|
||||
*
|
||||
* void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
|
||||
* Description
|
||||
@@ -5337,6 +5390,9 @@ union bpf_attr {
|
||||
*
|
||||
* *len* must be a statically known value. The returned data slice
|
||||
* is invalidated whenever the dynptr is invalidated.
|
||||
*
|
||||
* skb and xdp type dynptrs may not use bpf_dynptr_data. They should
|
||||
* instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
|
||||
* Return
|
||||
* Pointer to the underlying dynptr data, NULL if the dynptr is
|
||||
* read-only, if the dynptr is invalid, or if the offset and length
|
||||
@@ -6359,6 +6415,15 @@ struct bpf_link_info {
|
||||
struct {
|
||||
__u32 ifindex;
|
||||
} xdp;
|
||||
struct {
|
||||
__u32 map_id;
|
||||
} struct_ops;
|
||||
struct {
|
||||
__u32 pf;
|
||||
__u32 hooknum;
|
||||
__s32 priority;
|
||||
__u32 flags;
|
||||
} netfilter;
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
@@ -6934,6 +6999,10 @@ struct bpf_rb_node {
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_refcount {
|
||||
__u32 :32;
|
||||
} __attribute__((aligned(4)));
|
||||
|
||||
struct bpf_sysctl {
|
||||
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
|
||||
* Allows 1,2,4-byte read, but no write.
|
||||
@@ -7083,4 +7152,21 @@ struct bpf_core_relo {
|
||||
enum bpf_core_relo_kind kind;
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags to control bpf_timer_start() behaviour.
|
||||
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
|
||||
* relative to current time.
|
||||
*/
|
||||
enum {
|
||||
BPF_F_TIMER_ABS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF numbers iterator state */
|
||||
struct bpf_iter_num {
|
||||
/* opaque iterator state; having __u64 here allows to preserve correct
|
||||
* alignment requirements in vmlinux.h, generated from BTF
|
||||
*/
|
||||
__u64 __opaque[1];
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||
|
||||
@@ -605,6 +605,7 @@ enum {
|
||||
IFLA_MACVLAN_MACADDR_COUNT,
|
||||
IFLA_MACVLAN_BC_QUEUE_LEN,
|
||||
IFLA_MACVLAN_BC_QUEUE_LEN_USED,
|
||||
IFLA_MACVLAN_BC_CUTOFF,
|
||||
__IFLA_MACVLAN_MAX,
|
||||
};
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
|
||||
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
|
||||
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
|
||||
usdt.o
|
||||
usdt.o zip.o
|
||||
|
||||
@@ -230,9 +230,9 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
@@ -290,10 +290,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
|
||||
if (!!log_buf != !!log_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level > (4 | 2 | 1))
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
|
||||
func_info = OPTS_GET(opts, func_info, NULL);
|
||||
@@ -316,6 +312,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
|
||||
@@ -356,6 +353,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
goto done;
|
||||
}
|
||||
@@ -370,6 +368,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
attr.log_level = 1;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
}
|
||||
done:
|
||||
/* free() doesn't affect errno, so we don't need to restore it */
|
||||
@@ -794,11 +793,17 @@ int bpf_link_update(int link_fd, int new_prog_fd,
|
||||
if (!OPTS_VALID(opts, bpf_link_update_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_update.link_fd = link_fd;
|
||||
attr.link_update.new_prog_fd = new_prog_fd;
|
||||
attr.link_update.flags = OPTS_GET(opts, flags, 0);
|
||||
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
|
||||
if (OPTS_GET(opts, old_prog_fd, 0))
|
||||
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
|
||||
else if (OPTS_GET(opts, old_map_fd, 0))
|
||||
attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0);
|
||||
|
||||
ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
@@ -1078,9 +1083,9 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
@@ -1123,6 +1128,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
||||
attr.btf_log_level = 1;
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
|
||||
}
|
||||
|
||||
OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
/*
|
||||
* common eBPF ELF operations.
|
||||
* Common BPF ELF operations.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
|
||||
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
|
||||
@@ -96,13 +96,20 @@ struct bpf_prog_load_opts {
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
char *log_buf;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_prog_load_opts__last_field log_buf
|
||||
#define bpf_prog_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
@@ -117,11 +124,18 @@ struct bpf_btf_load_opts {
|
||||
char *log_buf;
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_btf_load_opts__last_field log_size
|
||||
#define bpf_btf_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
@@ -336,8 +350,9 @@ struct bpf_link_update_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 flags; /* extra flags */
|
||||
__u32 old_prog_fd; /* expected old program FD */
|
||||
__u32 old_map_fd; /* expected old map FD */
|
||||
};
|
||||
#define bpf_link_update_opts__last_field old_prog_fd
|
||||
#define bpf_link_update_opts__last_field old_map_fd
|
||||
|
||||
LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
|
||||
const struct bpf_link_update_opts *opts);
|
||||
@@ -386,14 +401,73 @@ LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
|
||||
const struct bpf_get_fd_by_id_opts *opts);
|
||||
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
|
||||
/* Type-safe variants of bpf_obj_get_info_by_fd(). The callers still needs to
|
||||
* pass info_len, which should normally be
|
||||
* sizeof(struct bpf_{prog,map,btf,link}_info), in order to be compatible with
|
||||
* different libbpf and kernel versions.
|
||||
|
||||
/**
|
||||
* @brief **bpf_prog_get_info_by_fd()** obtains information about the BPF
|
||||
* program corresponding to *prog_fd*.
|
||||
*
|
||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||
* actual number of bytes written to *info*.
|
||||
*
|
||||
* @param prog_fd BPF program file descriptor
|
||||
* @param info pointer to **struct bpf_prog_info** that will be populated with
|
||||
* BPF program information
|
||||
* @param info_len pointer to the size of *info*; on success updated with the
|
||||
* number of bytes written to *info*
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map_get_info_by_fd()** obtains information about the BPF
|
||||
* map corresponding to *map_fd*.
|
||||
*
|
||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||
* actual number of bytes written to *info*.
|
||||
*
|
||||
* @param map_fd BPF map file descriptor
|
||||
* @param info pointer to **struct bpf_map_info** that will be populated with
|
||||
* BPF map information
|
||||
* @param info_len pointer to the size of *info*; on success updated with the
|
||||
* number of bytes written to *info*
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
|
||||
|
||||
/**
|
||||
* @brief **bpf_btf_get_info_by_fd()** obtains information about the
|
||||
* BTF object corresponding to *btf_fd*.
|
||||
*
|
||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||
* actual number of bytes written to *info*.
|
||||
*
|
||||
* @param btf_fd BTF object file descriptor
|
||||
* @param info pointer to **struct bpf_btf_info** that will be populated with
|
||||
* BTF object information
|
||||
* @param info_len pointer to the size of *info*; on success updated with the
|
||||
* number of bytes written to *info*
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len);
|
||||
|
||||
/**
|
||||
* @brief **bpf_btf_get_info_by_fd()** obtains information about the BPF
|
||||
* link corresponding to *link_fd*.
|
||||
*
|
||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||
* actual number of bytes written to *info*.
|
||||
*
|
||||
* @param link_fd BPF link file descriptor
|
||||
* @param info pointer to **struct bpf_link_info** that will be populated with
|
||||
* BPF link information
|
||||
* @param info_len pointer to the size of *info*; on success updated with the
|
||||
* number of bytes written to *info*
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len);
|
||||
|
||||
struct bpf_prog_query_opts {
|
||||
|
||||
@@ -11,6 +11,7 @@ struct ksym_relo_desc {
|
||||
int insn_idx;
|
||||
bool is_weak;
|
||||
bool is_typeless;
|
||||
bool is_ld64;
|
||||
};
|
||||
|
||||
struct ksym_desc {
|
||||
@@ -24,6 +25,7 @@ struct ksym_desc {
|
||||
bool typeless;
|
||||
};
|
||||
int insn;
|
||||
bool is_ld64;
|
||||
};
|
||||
|
||||
struct bpf_gen {
|
||||
@@ -65,7 +67,7 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
||||
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx);
|
||||
bool is_typeless, bool is_ld64, int kind, int insn_idx);
|
||||
void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo);
|
||||
void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx);
|
||||
|
||||
|
||||
@@ -174,8 +174,13 @@ enum libbpf_tristate {
|
||||
|
||||
#define __kconfig __attribute__((section(".kconfig")))
|
||||
#define __ksym __attribute__((section(".ksyms")))
|
||||
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||
#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
|
||||
|
||||
#define bpf_ksym_exists(sym) ({ \
|
||||
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
|
||||
!!sym; \
|
||||
})
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
@@ -286,4 +291,107 @@ enum libbpf_tristate {
|
||||
/* Helper macro to print out debug messages */
|
||||
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
|
||||
|
||||
struct bpf_iter_num;
|
||||
|
||||
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym;
|
||||
extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
|
||||
extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
|
||||
|
||||
#ifndef bpf_for_each
|
||||
/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
|
||||
* using BPF open-coded iterators without having to write mundane explicit
|
||||
* low-level loop logic. Instead, it provides for()-like generic construct
|
||||
* that can be used pretty naturally. E.g., for some hypothetical cgroup
|
||||
* iterator, you'd write:
|
||||
*
|
||||
* struct cgroup *cg, *parent_cg = <...>;
|
||||
*
|
||||
* bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
|
||||
* bpf_printk("Child cgroup id = %d", cg->cgroup_id);
|
||||
* if (cg->cgroup_id == 123)
|
||||
* break;
|
||||
* }
|
||||
*
|
||||
* I.e., it looks almost like high-level for each loop in other languages,
|
||||
* supports continue/break, and is verifiable by BPF verifier.
|
||||
*
|
||||
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
|
||||
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
|
||||
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
|
||||
* *`, not just `int`. So for integers bpf_for() is more convenient.
|
||||
*
|
||||
* Note: this macro relies on C99 feature of allowing to declare variables
|
||||
* inside for() loop, bound to for() loop lifetime. It also utilizes GCC
|
||||
* extension: __attribute__((cleanup(<func>))), supported by both GCC and
|
||||
* Clang.
|
||||
*/
|
||||
#define bpf_for_each(type, cur, args...) for ( \
|
||||
/* initialize and define destructor */ \
|
||||
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
|
||||
cleanup(bpf_iter_##type##_destroy))), \
|
||||
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
|
||||
*___p __attribute__((unused)) = ( \
|
||||
bpf_iter_##type##_new(&___it, ##args), \
|
||||
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
|
||||
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
|
||||
(void)bpf_iter_##type##_destroy, (void *)0); \
|
||||
/* iteration and termination check */ \
|
||||
(((cur) = bpf_iter_##type##_next(&___it))); \
|
||||
)
|
||||
#endif /* bpf_for_each */
|
||||
|
||||
#ifndef bpf_for
|
||||
/* bpf_for(i, start, end) implements a for()-like looping construct that sets
|
||||
* provided integer variable *i* to values starting from *start* through,
|
||||
* but not including, *end*. It also proves to BPF verifier that *i* belongs
|
||||
* to range [start, end), so this can be used for accessing arrays without
|
||||
* extra checks.
|
||||
*
|
||||
* Note: *start* and *end* are assumed to be expressions with no side effects
|
||||
* and whose values do not change throughout bpf_for() loop execution. They do
|
||||
* not have to be statically known or constant, though.
|
||||
*
|
||||
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
|
||||
* loop bound variables and cleanup attribute, supported by GCC and Clang.
|
||||
*/
|
||||
#define bpf_for(i, start, end) for ( \
|
||||
/* initialize and define destructor */ \
|
||||
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
|
||||
cleanup(bpf_iter_num_destroy))), \
|
||||
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
|
||||
*___p __attribute__((unused)) = ( \
|
||||
bpf_iter_num_new(&___it, (start), (end)), \
|
||||
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
|
||||
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
|
||||
(void)bpf_iter_num_destroy, (void *)0); \
|
||||
({ \
|
||||
/* iteration step */ \
|
||||
int *___t = bpf_iter_num_next(&___it); \
|
||||
/* termination and bounds check */ \
|
||||
(___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
|
||||
}); \
|
||||
)
|
||||
#endif /* bpf_for */
|
||||
|
||||
#ifndef bpf_repeat
|
||||
/* bpf_repeat(N) performs N iterations without exposing iteration number
|
||||
*
|
||||
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
|
||||
* loop bound variables and cleanup attribute, supported by GCC and Clang.
|
||||
*/
|
||||
#define bpf_repeat(N) for ( \
|
||||
/* initialize and define destructor */ \
|
||||
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
|
||||
cleanup(bpf_iter_num_destroy))), \
|
||||
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
|
||||
*___p __attribute__((unused)) = ( \
|
||||
bpf_iter_num_new(&___it, 0, (N)), \
|
||||
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
|
||||
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
|
||||
(void)bpf_iter_num_destroy, (void *)0); \
|
||||
bpf_iter_num_next(&___it); \
|
||||
/* nothing here */ \
|
||||
)
|
||||
#endif /* bpf_repeat */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -204,6 +204,7 @@ struct pt_regs___s390 {
|
||||
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
|
||||
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
|
||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||
#define __PT_PARM5_SYSCALL_REG uregs[4]
|
||||
#define __PT_PARM6_SYSCALL_REG uregs[5]
|
||||
#define __PT_PARM7_SYSCALL_REG uregs[6]
|
||||
|
||||
@@ -415,6 +416,8 @@ struct pt_regs___arm64 {
|
||||
* https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
|
||||
*/
|
||||
|
||||
/* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG regs[4]
|
||||
#define __PT_PARM2_REG regs[5]
|
||||
#define __PT_PARM3_REG regs[6]
|
||||
|
||||
@@ -1000,8 +1000,6 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
||||
if (!btf_data) {
|
||||
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
|
||||
err = -ENODATA;
|
||||
|
||||
@@ -560,7 +560,7 @@ static void emit_find_attach_target(struct bpf_gen *gen)
|
||||
}
|
||||
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx)
|
||||
bool is_typeless, bool is_ld64, int kind, int insn_idx)
|
||||
{
|
||||
struct ksym_relo_desc *relo;
|
||||
|
||||
@@ -574,6 +574,7 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
relo->name = name;
|
||||
relo->is_weak = is_weak;
|
||||
relo->is_typeless = is_typeless;
|
||||
relo->is_ld64 = is_ld64;
|
||||
relo->kind = kind;
|
||||
relo->insn_idx = insn_idx;
|
||||
gen->relo_cnt++;
|
||||
@@ -586,9 +587,11 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (!strcmp(gen->ksyms[i].name, relo->name)) {
|
||||
gen->ksyms[i].ref++;
|
||||
return &gen->ksyms[i];
|
||||
kdesc = &gen->ksyms[i];
|
||||
if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
|
||||
!strcmp(kdesc->name, relo->name)) {
|
||||
kdesc->ref++;
|
||||
return kdesc;
|
||||
}
|
||||
}
|
||||
kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
|
||||
@@ -603,6 +606,7 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des
|
||||
kdesc->ref = 1;
|
||||
kdesc->off = 0;
|
||||
kdesc->insn = 0;
|
||||
kdesc->is_ld64 = relo->is_ld64;
|
||||
return kdesc;
|
||||
}
|
||||
|
||||
@@ -804,11 +808,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
/* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
|
||||
* If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
|
||||
*/
|
||||
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
|
||||
goto clear_src_reg;
|
||||
}
|
||||
@@ -831,7 +837,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
/* skip src_reg adjustment */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
|
||||
clear_src_reg:
|
||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||
reg_mask = src_reg_mask();
|
||||
@@ -862,23 +868,17 @@ static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn
|
||||
{
|
||||
int insn;
|
||||
|
||||
pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
|
||||
pr_debug("gen: emit_relo (%d): %s at %d %s\n",
|
||||
relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
|
||||
switch (relo->kind) {
|
||||
case BTF_KIND_VAR:
|
||||
if (relo->is_ld64) {
|
||||
if (relo->is_typeless)
|
||||
emit_relo_ksym_typeless(gen, relo, insn);
|
||||
else
|
||||
emit_relo_ksym_btf(gen, relo, insn);
|
||||
break;
|
||||
case BTF_KIND_FUNC:
|
||||
} else {
|
||||
emit_relo_kfunc_btf(gen, relo, insn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown relocation kind '%d'\n", relo->kind);
|
||||
gen->error = -EDOM;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -901,18 +901,20 @@ static void cleanup_core_relo(struct bpf_gen *gen)
|
||||
|
||||
static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
int i, insn;
|
||||
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
kdesc = &gen->ksyms[i];
|
||||
/* only close fds for typed ksyms and kfuncs */
|
||||
if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
|
||||
if (kdesc->is_ld64 && !kdesc->typeless) {
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = gen->ksyms[i].insn;
|
||||
insn = kdesc->insn;
|
||||
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
|
||||
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
|
||||
} else if (!kdesc->is_ld64) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
|
||||
if (kdesc->off < MAX_FD_ARRAY_SZ)
|
||||
gen->nr_fd_array--;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -101,6 +101,8 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
|
||||
* be used for libbpf warnings and informational messages.
|
||||
* @param fn The log print function. If NULL, libbpf won't print anything.
|
||||
* @return Pointer to old print function.
|
||||
*
|
||||
* This function is thread-safe.
|
||||
*/
|
||||
LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
|
||||
|
||||
@@ -447,12 +449,15 @@ LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_perf_event_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
/* don't use BPF link when attach BPF program */
|
||||
bool force_ioctl_attach;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_perf_event_opts__last_field bpf_cookie
|
||||
#define bpf_perf_event_opts__last_field force_ioctl_attach
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
|
||||
@@ -461,8 +466,25 @@ LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
|
||||
const struct bpf_perf_event_opts *opts);
|
||||
|
||||
/**
|
||||
* enum probe_attach_mode - the mode to attach kprobe/uprobe
|
||||
*
|
||||
* force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will
|
||||
* be returned if it is not supported by the kernel.
|
||||
*/
|
||||
enum probe_attach_mode {
|
||||
/* attach probe in latest supported mode by kernel */
|
||||
PROBE_ATTACH_MODE_DEFAULT = 0,
|
||||
/* attach probe in legacy mode, using debugfs/tracefs */
|
||||
PROBE_ATTACH_MODE_LEGACY,
|
||||
/* create perf event with perf_event_open() syscall */
|
||||
PROBE_ATTACH_MODE_PERF,
|
||||
/* attach probe with BPF link */
|
||||
PROBE_ATTACH_MODE_LINK,
|
||||
};
|
||||
|
||||
struct bpf_kprobe_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
@@ -470,9 +492,11 @@ struct bpf_kprobe_opts {
|
||||
size_t offset;
|
||||
/* kprobe is return probe */
|
||||
bool retprobe;
|
||||
/* kprobe attach mode */
|
||||
enum probe_attach_mode attach_mode;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_kprobe_opts__last_field retprobe
|
||||
#define bpf_kprobe_opts__last_field attach_mode
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
|
||||
@@ -506,7 +530,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
|
||||
const struct bpf_kprobe_multi_opts *opts);
|
||||
|
||||
struct bpf_ksyscall_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
@@ -552,7 +576,7 @@ bpf_program__attach_ksyscall(const struct bpf_program *prog,
|
||||
const struct bpf_ksyscall_opts *opts);
|
||||
|
||||
struct bpf_uprobe_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* offset of kernel reference counted USDT semaphore, added in
|
||||
* a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
|
||||
@@ -570,9 +594,11 @@ struct bpf_uprobe_opts {
|
||||
* binary_path.
|
||||
*/
|
||||
const char *func_name;
|
||||
/* uprobe attach mode */
|
||||
enum probe_attach_mode attach_mode;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_uprobe_opts__last_field func_name
|
||||
#define bpf_uprobe_opts__last_field attach_mode
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
|
||||
@@ -646,7 +672,7 @@ bpf_program__attach_usdt(const struct bpf_program *prog,
|
||||
const struct bpf_usdt_opts *opts);
|
||||
|
||||
struct bpf_tracepoint_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
@@ -695,6 +721,7 @@ bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||
struct bpf_map;
|
||||
|
||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map);
|
||||
|
||||
struct bpf_iter_attach_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
@@ -1110,7 +1137,7 @@ struct user_ring_buffer;
|
||||
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
|
||||
|
||||
struct ring_buffer_opts {
|
||||
size_t sz; /* size of this struct, for forward/backward compatiblity */
|
||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||
};
|
||||
|
||||
#define ring_buffer_opts__last_field sz
|
||||
@@ -1475,7 +1502,7 @@ LIBBPF_API void
|
||||
bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
|
||||
|
||||
struct gen_loader_opts {
|
||||
size_t sz; /* size of this struct, for forward/backward compatiblity */
|
||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||
const char *data;
|
||||
const char *insns;
|
||||
__u32 data_sz;
|
||||
@@ -1493,13 +1520,13 @@ enum libbpf_tristate {
|
||||
};
|
||||
|
||||
struct bpf_linker_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
};
|
||||
#define bpf_linker_opts__last_field sz
|
||||
|
||||
struct bpf_linker_file_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
};
|
||||
#define bpf_linker_file_opts__last_field sz
|
||||
@@ -1542,7 +1569,7 @@ typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cook
|
||||
struct bpf_link **link);
|
||||
|
||||
struct libbpf_prog_handler_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* User-provided value that is passed to prog_setup_fn,
|
||||
* prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to
|
||||
|
||||
@@ -386,6 +386,7 @@ LIBBPF_1.1.0 {
|
||||
LIBBPF_1.2.0 {
|
||||
global:
|
||||
bpf_btf_get_info_by_fd;
|
||||
bpf_link__update_map;
|
||||
bpf_link_get_info_by_fd;
|
||||
bpf_map_get_info_by_fd;
|
||||
bpf_prog_get_info_by_fd;
|
||||
|
||||
@@ -180,6 +180,7 @@ static int probe_prog_load(enum bpf_prog_type prog_type,
|
||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_NETFILTER:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@@ -1115,7 +1115,19 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src
|
||||
|
||||
if (src->shdr->sh_type != SHT_NOBITS) {
|
||||
tmp = realloc(dst->raw_data, dst_final_sz);
|
||||
if (!tmp)
|
||||
/* If dst_align_sz == 0, realloc() behaves in a special way:
|
||||
* 1. When dst->raw_data is NULL it returns:
|
||||
* "either NULL or a pointer suitable to be passed to free()" [1].
|
||||
* 2. When dst->raw_data is not-NULL it frees dst->raw_data and returns NULL,
|
||||
* thus invalidating any "pointer suitable to be passed to free()" obtained
|
||||
* at step (1).
|
||||
*
|
||||
* The dst_align_sz > 0 check avoids error exit after (2), otherwise
|
||||
* dst->raw_data would be freed again in bpf_linker__free().
|
||||
*
|
||||
* [1] man 3 realloc
|
||||
*/
|
||||
if (!tmp && dst_align_sz > 0)
|
||||
return -ENOMEM;
|
||||
dst->raw_data = tmp;
|
||||
|
||||
@@ -1997,7 +2009,6 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
|
||||
static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj)
|
||||
{
|
||||
struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx];
|
||||
struct dst_sec *dst_symtab;
|
||||
int i, err;
|
||||
|
||||
for (i = 1; i < obj->sec_cnt; i++) {
|
||||
@@ -2030,9 +2041,6 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* add_dst_sec() above could have invalidated linker->secs */
|
||||
dst_symtab = &linker->secs[linker->symtab_sec_idx];
|
||||
|
||||
/* shdr->sh_link points to SYMTAB */
|
||||
dst_sec->shdr->sh_link = linker->symtab_sec_idx;
|
||||
|
||||
@@ -2049,16 +2057,13 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
|
||||
dst_rel = dst_sec->raw_data + src_sec->dst_off;
|
||||
n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize;
|
||||
for (j = 0; j < n; j++, src_rel++, dst_rel++) {
|
||||
size_t src_sym_idx = ELF64_R_SYM(src_rel->r_info);
|
||||
size_t sym_type = ELF64_R_TYPE(src_rel->r_info);
|
||||
Elf64_Sym *src_sym, *dst_sym;
|
||||
size_t dst_sym_idx;
|
||||
size_t src_sym_idx, dst_sym_idx, sym_type;
|
||||
Elf64_Sym *src_sym;
|
||||
|
||||
src_sym_idx = ELF64_R_SYM(src_rel->r_info);
|
||||
src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx;
|
||||
|
||||
dst_sym_idx = obj->sym_map[src_sym_idx];
|
||||
dst_sym = dst_symtab->raw_data + sizeof(*dst_sym) * dst_sym_idx;
|
||||
dst_rel->r_offset += src_linked_sec->dst_off;
|
||||
sym_type = ELF64_R_TYPE(src_rel->r_info);
|
||||
dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type);
|
||||
|
||||
@@ -468,8 +468,13 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
|
||||
return 0;
|
||||
|
||||
err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
if (err == -ENOENT) {
|
||||
opts->feature_flags = 0;
|
||||
goto skip_feature_flags;
|
||||
}
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
|
||||
@@ -489,6 +494,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
|
||||
|
||||
opts->feature_flags = md.flags;
|
||||
|
||||
skip_feature_flags:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1551,9 +1551,6 @@ int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const st
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_t = btf_type_by_id(local_btf, local_id);
|
||||
targ_t = btf_type_by_id(targ_btf, targ_id);
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
|
||||
@@ -1141,12 +1141,13 @@ static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz);
|
||||
|
||||
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
|
||||
{
|
||||
struct usdt_arg_spec *arg;
|
||||
const char *s;
|
||||
int len;
|
||||
int arg_sz, len;
|
||||
|
||||
spec->usdt_cookie = usdt_cookie;
|
||||
spec->arg_cnt = 0;
|
||||
@@ -1159,10 +1160,25 @@ static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note,
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]);
|
||||
arg = &spec->args[spec->arg_cnt];
|
||||
len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz);
|
||||
if (len < 0)
|
||||
return len;
|
||||
|
||||
arg->arg_signed = arg_sz < 0;
|
||||
if (arg_sz < 0)
|
||||
arg_sz = -arg_sz;
|
||||
|
||||
switch (arg_sz) {
|
||||
case 1: case 2: case 4: case 8:
|
||||
arg->arg_bitshift = 64 - arg_sz * 8;
|
||||
break;
|
||||
default:
|
||||
pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
|
||||
spec->arg_cnt, s, arg_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
s += len;
|
||||
spec->arg_cnt++;
|
||||
}
|
||||
@@ -1219,13 +1235,13 @@ static int calc_pt_regs_off(const char *reg_name)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
char reg_name[16];
|
||||
int arg_sz, len, reg_off;
|
||||
int len, reg_off;
|
||||
long off;
|
||||
|
||||
if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", &arg_sz, &off, reg_name, &len) == 3) {
|
||||
if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) {
|
||||
/* Memory dereference case, e.g., -4@-20(%rbp) */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = off;
|
||||
@@ -1233,7 +1249,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", &arg_sz, reg_name, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Memory dereference case without offset, e.g., 8@(%rsp) */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = 0;
|
||||
@@ -1241,7 +1257,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ %%%15s %n", &arg_sz, reg_name, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Register read case, e.g., -4@%eax */
|
||||
arg->arg_type = USDT_ARG_REG;
|
||||
arg->val_off = 0;
|
||||
@@ -1250,7 +1266,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) {
|
||||
/* Constant value case, e.g., 4@$71 */
|
||||
arg->arg_type = USDT_ARG_CONST;
|
||||
arg->val_off = off;
|
||||
@@ -1260,20 +1276,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
arg->arg_signed = arg_sz < 0;
|
||||
if (arg_sz < 0)
|
||||
arg_sz = -arg_sz;
|
||||
|
||||
switch (arg_sz) {
|
||||
case 1: case 2: case 4: case 8:
|
||||
arg->arg_bitshift = 64 - arg_sz * 8;
|
||||
break;
|
||||
default:
|
||||
pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
|
||||
arg_num, arg_str, arg_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -1281,13 +1283,13 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
|
||||
/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
unsigned int reg;
|
||||
int arg_sz, len;
|
||||
int len;
|
||||
long off;
|
||||
|
||||
if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, ®, &len) == 3) {
|
||||
if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, ®, &len) == 3) {
|
||||
/* Memory dereference case, e.g., -2@-28(%r15) */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = off;
|
||||
@@ -1296,7 +1298,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
|
||||
} else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, ®, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, ®, &len) == 2) {
|
||||
/* Register read case, e.g., -8@%r0 */
|
||||
arg->arg_type = USDT_ARG_REG;
|
||||
arg->val_off = 0;
|
||||
@@ -1305,7 +1307,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
|
||||
/* Constant value case, e.g., 4@71 */
|
||||
arg->arg_type = USDT_ARG_CONST;
|
||||
arg->val_off = off;
|
||||
@@ -1315,20 +1317,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
arg->arg_signed = arg_sz < 0;
|
||||
if (arg_sz < 0)
|
||||
arg_sz = -arg_sz;
|
||||
|
||||
switch (arg_sz) {
|
||||
case 1: case 2: case 4: case 8:
|
||||
arg->arg_bitshift = 64 - arg_sz * 8;
|
||||
break;
|
||||
default:
|
||||
pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
|
||||
arg_num, arg_str, arg_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -1348,13 +1336,13 @@ static int calc_pt_regs_off(const char *reg_name)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
char reg_name[16];
|
||||
int arg_sz, len, reg_off;
|
||||
int len, reg_off;
|
||||
long off;
|
||||
|
||||
if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) {
|
||||
if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) {
|
||||
/* Memory dereference case, e.g., -4@[sp, 96] */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = off;
|
||||
@@ -1362,7 +1350,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Memory dereference case, e.g., -4@[sp] */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = 0;
|
||||
@@ -1370,12 +1358,12 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
|
||||
/* Constant value case, e.g., 4@5 */
|
||||
arg->arg_type = USDT_ARG_CONST;
|
||||
arg->val_off = off;
|
||||
arg->reg_off = 0;
|
||||
} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Register read case, e.g., -8@x4 */
|
||||
arg->arg_type = USDT_ARG_REG;
|
||||
arg->val_off = 0;
|
||||
@@ -1388,20 +1376,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
arg->arg_signed = arg_sz < 0;
|
||||
if (arg_sz < 0)
|
||||
arg_sz = -arg_sz;
|
||||
|
||||
switch (arg_sz) {
|
||||
case 1: case 2: case 4: case 8:
|
||||
arg->arg_bitshift = 64 - arg_sz * 8;
|
||||
break;
|
||||
default:
|
||||
pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
|
||||
arg_num, arg_str, arg_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -1456,13 +1430,13 @@ static int calc_pt_regs_off(const char *reg_name)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
char reg_name[16];
|
||||
int arg_sz, len, reg_off;
|
||||
int len, reg_off;
|
||||
long off;
|
||||
|
||||
if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", &arg_sz, &off, reg_name, &len) == 3) {
|
||||
if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) {
|
||||
/* Memory dereference case, e.g., -8@-88(s0) */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = off;
|
||||
@@ -1470,12 +1444,12 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
|
||||
/* Constant value case, e.g., 4@5 */
|
||||
arg->arg_type = USDT_ARG_CONST;
|
||||
arg->val_off = off;
|
||||
arg->reg_off = 0;
|
||||
} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
|
||||
} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Register read case, e.g., -8@a1 */
|
||||
arg->arg_type = USDT_ARG_REG;
|
||||
arg->val_off = 0;
|
||||
@@ -1488,17 +1462,83 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
arg->arg_signed = arg_sz < 0;
|
||||
if (arg_sz < 0)
|
||||
arg_sz = -arg_sz;
|
||||
return len;
|
||||
}
|
||||
|
||||
switch (arg_sz) {
|
||||
case 1: case 2: case 4: case 8:
|
||||
arg->arg_bitshift = 64 - arg_sz * 8;
|
||||
break;
|
||||
default:
|
||||
pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
|
||||
arg_num, arg_str, arg_sz);
|
||||
#elif defined(__arm__)
|
||||
|
||||
static int calc_pt_regs_off(const char *reg_name)
|
||||
{
|
||||
static struct {
|
||||
const char *name;
|
||||
size_t pt_regs_off;
|
||||
} reg_map[] = {
|
||||
{ "r0", offsetof(struct pt_regs, uregs[0]) },
|
||||
{ "r1", offsetof(struct pt_regs, uregs[1]) },
|
||||
{ "r2", offsetof(struct pt_regs, uregs[2]) },
|
||||
{ "r3", offsetof(struct pt_regs, uregs[3]) },
|
||||
{ "r4", offsetof(struct pt_regs, uregs[4]) },
|
||||
{ "r5", offsetof(struct pt_regs, uregs[5]) },
|
||||
{ "r6", offsetof(struct pt_regs, uregs[6]) },
|
||||
{ "r7", offsetof(struct pt_regs, uregs[7]) },
|
||||
{ "r8", offsetof(struct pt_regs, uregs[8]) },
|
||||
{ "r9", offsetof(struct pt_regs, uregs[9]) },
|
||||
{ "r10", offsetof(struct pt_regs, uregs[10]) },
|
||||
{ "fp", offsetof(struct pt_regs, uregs[11]) },
|
||||
{ "ip", offsetof(struct pt_regs, uregs[12]) },
|
||||
{ "sp", offsetof(struct pt_regs, uregs[13]) },
|
||||
{ "lr", offsetof(struct pt_regs, uregs[14]) },
|
||||
{ "pc", offsetof(struct pt_regs, uregs[15]) },
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
|
||||
if (strcmp(reg_name, reg_map[i].name) == 0)
|
||||
return reg_map[i].pt_regs_off;
|
||||
}
|
||||
|
||||
pr_warn("usdt: unrecognized register '%s'\n", reg_name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
char reg_name[16];
|
||||
int len, reg_off;
|
||||
long off;
|
||||
|
||||
if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n",
|
||||
arg_sz, reg_name, &off, &len) == 3) {
|
||||
/* Memory dereference case, e.g., -4@[fp, #96] */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = off;
|
||||
reg_off = calc_pt_regs_off(reg_name);
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Memory dereference case, e.g., -4@[sp] */
|
||||
arg->arg_type = USDT_ARG_REG_DEREF;
|
||||
arg->val_off = 0;
|
||||
reg_off = calc_pt_regs_off(reg_name);
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) {
|
||||
/* Constant value case, e.g., 4@#5 */
|
||||
arg->arg_type = USDT_ARG_CONST;
|
||||
arg->val_off = off;
|
||||
arg->reg_off = 0;
|
||||
} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
|
||||
/* Register read case, e.g., -8@r4 */
|
||||
arg->arg_type = USDT_ARG_REG;
|
||||
arg->val_off = 0;
|
||||
reg_off = calc_pt_regs_off(reg_name);
|
||||
if (reg_off < 0)
|
||||
return reg_off;
|
||||
arg->reg_off = reg_off;
|
||||
} else {
|
||||
pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1507,7 +1547,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
|
||||
|
||||
#else
|
||||
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
|
||||
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
|
||||
{
|
||||
pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
|
||||
return -ENOTSUP;
|
||||
|
||||
333
tools/lib/bpf/zip.c
Normal file
333
tools/lib/bpf/zip.c
Normal file
@@ -0,0 +1,333 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/*
|
||||
* Routines for dealing with .zip archives.
|
||||
*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "libbpf_internal.h"
|
||||
#include "zip.h"
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wpacked"
|
||||
#pragma GCC diagnostic ignored "-Wattributes"
|
||||
|
||||
/* Specification of ZIP file format can be found here:
|
||||
* https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||
* For a high level overview of the structure of a ZIP file see
|
||||
* sections 4.3.1 - 4.3.6.
|
||||
*
|
||||
* Data structures appearing in ZIP files do not contain any
|
||||
* padding and they might be misaligned. To allow us to safely
|
||||
* operate on pointers to such structures and their members, we
|
||||
* declare the types as packed.
|
||||
*/
|
||||
|
||||
#define END_OF_CD_RECORD_MAGIC 0x06054b50
|
||||
|
||||
/* See section 4.3.16 of the spec. */
|
||||
struct end_of_cd_record {
|
||||
/* Magic value equal to END_OF_CD_RECORD_MAGIC */
|
||||
__u32 magic;
|
||||
|
||||
/* Number of the file containing this structure or 0xFFFF if ZIP64 archive.
|
||||
* Zip archive might span multiple files (disks).
|
||||
*/
|
||||
__u16 this_disk;
|
||||
|
||||
/* Number of the file containing the beginning of the central directory or
|
||||
* 0xFFFF if ZIP64 archive.
|
||||
*/
|
||||
__u16 cd_disk;
|
||||
|
||||
/* Number of central directory records on this disk or 0xFFFF if ZIP64
|
||||
* archive.
|
||||
*/
|
||||
__u16 cd_records;
|
||||
|
||||
/* Number of central directory records on all disks or 0xFFFF if ZIP64
|
||||
* archive.
|
||||
*/
|
||||
__u16 cd_records_total;
|
||||
|
||||
/* Size of the central directory record or 0xFFFFFFFF if ZIP64 archive. */
|
||||
__u32 cd_size;
|
||||
|
||||
/* Offset of the central directory from the beginning of the archive or
|
||||
* 0xFFFFFFFF if ZIP64 archive.
|
||||
*/
|
||||
__u32 cd_offset;
|
||||
|
||||
/* Length of comment data following end of central directory record. */
|
||||
__u16 comment_length;
|
||||
|
||||
/* Up to 64k of arbitrary bytes. */
|
||||
/* uint8_t comment[comment_length] */
|
||||
} __attribute__((packed));
|
||||
|
||||
#define CD_FILE_HEADER_MAGIC 0x02014b50
|
||||
#define FLAG_ENCRYPTED (1 << 0)
|
||||
#define FLAG_HAS_DATA_DESCRIPTOR (1 << 3)
|
||||
|
||||
/* See section 4.3.12 of the spec. */
|
||||
struct cd_file_header {
|
||||
/* Magic value equal to CD_FILE_HEADER_MAGIC. */
|
||||
__u32 magic;
|
||||
__u16 version;
|
||||
/* Minimum zip version needed to extract the file. */
|
||||
__u16 min_version;
|
||||
__u16 flags;
|
||||
__u16 compression;
|
||||
__u16 last_modified_time;
|
||||
__u16 last_modified_date;
|
||||
__u32 crc;
|
||||
__u32 compressed_size;
|
||||
__u32 uncompressed_size;
|
||||
__u16 file_name_length;
|
||||
__u16 extra_field_length;
|
||||
__u16 file_comment_length;
|
||||
/* Number of the disk where the file starts or 0xFFFF if ZIP64 archive. */
|
||||
__u16 disk;
|
||||
__u16 internal_attributes;
|
||||
__u32 external_attributes;
|
||||
/* Offset from the start of the disk containing the local file header to the
|
||||
* start of the local file header.
|
||||
*/
|
||||
__u32 offset;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define LOCAL_FILE_HEADER_MAGIC 0x04034b50
|
||||
|
||||
/* See section 4.3.7 of the spec. */
|
||||
struct local_file_header {
|
||||
/* Magic value equal to LOCAL_FILE_HEADER_MAGIC. */
|
||||
__u32 magic;
|
||||
/* Minimum zip version needed to extract the file. */
|
||||
__u16 min_version;
|
||||
__u16 flags;
|
||||
__u16 compression;
|
||||
__u16 last_modified_time;
|
||||
__u16 last_modified_date;
|
||||
__u32 crc;
|
||||
__u32 compressed_size;
|
||||
__u32 uncompressed_size;
|
||||
__u16 file_name_length;
|
||||
__u16 extra_field_length;
|
||||
} __attribute__((packed));
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
struct zip_archive {
|
||||
void *data;
|
||||
__u32 size;
|
||||
__u32 cd_offset;
|
||||
__u32 cd_records;
|
||||
};
|
||||
|
||||
static void *check_access(struct zip_archive *archive, __u32 offset, __u32 size)
|
||||
{
|
||||
if (offset + size > archive->size || offset > offset + size)
|
||||
return NULL;
|
||||
|
||||
return archive->data + offset;
|
||||
}
|
||||
|
||||
/* Returns 0 on success, -EINVAL on error and -ENOTSUP if the eocd indicates the
|
||||
* archive uses features which are not supported.
|
||||
*/
|
||||
static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset)
|
||||
{
|
||||
__u16 comment_length, cd_records;
|
||||
struct end_of_cd_record *eocd;
|
||||
__u32 cd_offset, cd_size;
|
||||
|
||||
eocd = check_access(archive, offset, sizeof(*eocd));
|
||||
if (!eocd || eocd->magic != END_OF_CD_RECORD_MAGIC)
|
||||
return -EINVAL;
|
||||
|
||||
comment_length = eocd->comment_length;
|
||||
if (offset + sizeof(*eocd) + comment_length != archive->size)
|
||||
return -EINVAL;
|
||||
|
||||
cd_records = eocd->cd_records;
|
||||
if (eocd->this_disk != 0 || eocd->cd_disk != 0 || eocd->cd_records_total != cd_records)
|
||||
/* This is a valid eocd, but we only support single-file non-ZIP64 archives. */
|
||||
return -ENOTSUP;
|
||||
|
||||
cd_offset = eocd->cd_offset;
|
||||
cd_size = eocd->cd_size;
|
||||
if (!check_access(archive, cd_offset, cd_size))
|
||||
return -EINVAL;
|
||||
|
||||
archive->cd_offset = cd_offset;
|
||||
archive->cd_records = cd_records;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int find_cd(struct zip_archive *archive)
|
||||
{
|
||||
int64_t limit, offset;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (archive->size <= sizeof(struct end_of_cd_record))
|
||||
return -EINVAL;
|
||||
|
||||
/* Because the end of central directory ends with a variable length array of
|
||||
* up to 0xFFFF bytes we can't know exactly where it starts and need to
|
||||
* search for it at the end of the file, scanning the (limit, offset] range.
|
||||
*/
|
||||
offset = archive->size - sizeof(struct end_of_cd_record);
|
||||
limit = (int64_t)offset - (1 << 16);
|
||||
|
||||
for (; offset >= 0 && offset > limit && rc != 0; offset--) {
|
||||
rc = try_parse_end_of_cd(archive, offset);
|
||||
if (rc == -ENOTSUP)
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct zip_archive *zip_archive_open(const char *path)
|
||||
{
|
||||
struct zip_archive *archive;
|
||||
int err, fd;
|
||||
off_t size;
|
||||
void *data;
|
||||
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return ERR_PTR(-errno);
|
||||
|
||||
size = lseek(fd, 0, SEEK_END);
|
||||
if (size == (off_t)-1 || size > UINT32_MAX) {
|
||||
close(fd);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
err = -errno;
|
||||
close(fd);
|
||||
|
||||
if (data == MAP_FAILED)
|
||||
return ERR_PTR(err);
|
||||
|
||||
archive = malloc(sizeof(*archive));
|
||||
if (!archive) {
|
||||
munmap(data, size);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
};
|
||||
|
||||
archive->data = data;
|
||||
archive->size = size;
|
||||
|
||||
err = find_cd(archive);
|
||||
if (err) {
|
||||
munmap(data, size);
|
||||
free(archive);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return archive;
|
||||
}
|
||||
|
||||
void zip_archive_close(struct zip_archive *archive)
|
||||
{
|
||||
munmap(archive->data, archive->size);
|
||||
free(archive);
|
||||
}
|
||||
|
||||
static struct local_file_header *local_file_header_at_offset(struct zip_archive *archive,
|
||||
__u32 offset)
|
||||
{
|
||||
struct local_file_header *lfh;
|
||||
|
||||
lfh = check_access(archive, offset, sizeof(*lfh));
|
||||
if (!lfh || lfh->magic != LOCAL_FILE_HEADER_MAGIC)
|
||||
return NULL;
|
||||
|
||||
return lfh;
|
||||
}
|
||||
|
||||
static int get_entry_at_offset(struct zip_archive *archive, __u32 offset, struct zip_entry *out)
|
||||
{
|
||||
struct local_file_header *lfh;
|
||||
__u32 compressed_size;
|
||||
const char *name;
|
||||
void *data;
|
||||
|
||||
lfh = local_file_header_at_offset(archive, offset);
|
||||
if (!lfh)
|
||||
return -EINVAL;
|
||||
|
||||
offset += sizeof(*lfh);
|
||||
if ((lfh->flags & FLAG_ENCRYPTED) || (lfh->flags & FLAG_HAS_DATA_DESCRIPTOR))
|
||||
return -EINVAL;
|
||||
|
||||
name = check_access(archive, offset, lfh->file_name_length);
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
offset += lfh->file_name_length;
|
||||
if (!check_access(archive, offset, lfh->extra_field_length))
|
||||
return -EINVAL;
|
||||
|
||||
offset += lfh->extra_field_length;
|
||||
compressed_size = lfh->compressed_size;
|
||||
data = check_access(archive, offset, compressed_size);
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
out->compression = lfh->compression;
|
||||
out->name_length = lfh->file_name_length;
|
||||
out->name = name;
|
||||
out->data = data;
|
||||
out->data_length = compressed_size;
|
||||
out->data_offset = offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zip_archive_find_entry(struct zip_archive *archive, const char *file_name,
|
||||
struct zip_entry *out)
|
||||
{
|
||||
size_t file_name_length = strlen(file_name);
|
||||
__u32 i, offset = archive->cd_offset;
|
||||
|
||||
for (i = 0; i < archive->cd_records; ++i) {
|
||||
__u16 cdfh_name_length, cdfh_flags;
|
||||
struct cd_file_header *cdfh;
|
||||
const char *cdfh_name;
|
||||
|
||||
cdfh = check_access(archive, offset, sizeof(*cdfh));
|
||||
if (!cdfh || cdfh->magic != CD_FILE_HEADER_MAGIC)
|
||||
return -EINVAL;
|
||||
|
||||
offset += sizeof(*cdfh);
|
||||
cdfh_name_length = cdfh->file_name_length;
|
||||
cdfh_name = check_access(archive, offset, cdfh_name_length);
|
||||
if (!cdfh_name)
|
||||
return -EINVAL;
|
||||
|
||||
cdfh_flags = cdfh->flags;
|
||||
if ((cdfh_flags & FLAG_ENCRYPTED) == 0 &&
|
||||
(cdfh_flags & FLAG_HAS_DATA_DESCRIPTOR) == 0 &&
|
||||
file_name_length == cdfh_name_length &&
|
||||
memcmp(file_name, archive->data + offset, file_name_length) == 0) {
|
||||
return get_entry_at_offset(archive, cdfh->offset, out);
|
||||
}
|
||||
|
||||
offset += cdfh_name_length;
|
||||
offset += cdfh->extra_field_length;
|
||||
offset += cdfh->file_comment_length;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
47
tools/lib/bpf/zip.h
Normal file
47
tools/lib/bpf/zip.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
#ifndef __LIBBPF_ZIP_H
|
||||
#define __LIBBPF_ZIP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Represents an open zip archive.
|
||||
* Only basic ZIP files are supported, in particular the following are not
|
||||
* supported:
|
||||
* - encryption
|
||||
* - streaming
|
||||
* - multi-part ZIP files
|
||||
* - ZIP64
|
||||
*/
|
||||
struct zip_archive;
|
||||
|
||||
/* Carries information on name, compression method, and data corresponding to a
|
||||
* file in a zip archive.
|
||||
*/
|
||||
struct zip_entry {
|
||||
/* Compression method as defined in pkzip spec. 0 means data is uncompressed. */
|
||||
__u16 compression;
|
||||
|
||||
/* Non-null terminated name of the file. */
|
||||
const char *name;
|
||||
/* Length of the file name. */
|
||||
__u16 name_length;
|
||||
|
||||
/* Pointer to the file data. */
|
||||
const void *data;
|
||||
/* Length of the file data. */
|
||||
__u32 data_length;
|
||||
/* Offset of the file data within the archive. */
|
||||
__u32 data_offset;
|
||||
};
|
||||
|
||||
/* Open a zip archive. Returns NULL in case of an error. */
|
||||
struct zip_archive *zip_archive_open(const char *path);
|
||||
|
||||
/* Close a zip archive and release resources. */
|
||||
void zip_archive_close(struct zip_archive *archive);
|
||||
|
||||
/* Look up an entry corresponding to a file in given zip archive. */
|
||||
int zip_archive_find_entry(struct zip_archive *archive, const char *name, struct zip_entry *out);
|
||||
|
||||
#endif
|
||||
424
tools/net/ynl/ethtool.py
Executable file
424
tools/net/ynl/ethtool.py
Executable file
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import pprint
|
||||
import sys
|
||||
import re
|
||||
|
||||
from lib import YnlFamily
|
||||
|
||||
def args_to_req(ynl, op_name, args, req):
|
||||
"""
|
||||
Verify and convert command-line arguments to the ynl-compatible request.
|
||||
"""
|
||||
valid_attrs = ynl.operation_do_attributes(op_name)
|
||||
valid_attrs.remove('header') # not user-provided
|
||||
|
||||
if len(args) == 0:
|
||||
print(f'no attributes, expected: {valid_attrs}')
|
||||
sys.exit(1)
|
||||
|
||||
i = 0
|
||||
while i < len(args):
|
||||
attr = args[i]
|
||||
if i + 1 >= len(args):
|
||||
print(f'expected value for \'{attr}\'')
|
||||
sys.exit(1)
|
||||
|
||||
if attr not in valid_attrs:
|
||||
print(f'invalid attribute \'{attr}\', expected: {valid_attrs}')
|
||||
sys.exit(1)
|
||||
|
||||
val = args[i+1]
|
||||
i += 2
|
||||
|
||||
req[attr] = val
|
||||
|
||||
def print_field(reply, *desc):
|
||||
"""
|
||||
Pretty-print a set of fields from the reply. desc specifies the
|
||||
fields and the optional type (bool/yn).
|
||||
"""
|
||||
if len(desc) == 0:
|
||||
return print_field(reply, *zip(reply.keys(), reply.keys()))
|
||||
|
||||
for spec in desc:
|
||||
try:
|
||||
field, name, tp = spec
|
||||
except:
|
||||
field, name = spec
|
||||
tp = 'int'
|
||||
|
||||
value = reply.get(field, None)
|
||||
if tp == 'yn':
|
||||
value = 'yes' if value else 'no'
|
||||
elif tp == 'bool' or isinstance(value, bool):
|
||||
value = 'on' if value else 'off'
|
||||
else:
|
||||
value = 'n/a' if value is None else value
|
||||
|
||||
print(f'{name}: {value}')
|
||||
|
||||
def print_speed(name, value):
|
||||
"""
|
||||
Print out the speed-like strings from the value dict.
|
||||
"""
|
||||
speed_re = re.compile(r'[0-9]+base[^/]+/.+')
|
||||
speed = [ k for k, v in value.items() if v and speed_re.match(k) ]
|
||||
print(f'{name}: {" ".join(speed)}')
|
||||
|
||||
def doit(ynl, args, op_name):
|
||||
"""
|
||||
Prepare request header, parse arguments and doit.
|
||||
"""
|
||||
req = {
|
||||
'header': {
|
||||
'dev-name': args.device,
|
||||
},
|
||||
}
|
||||
|
||||
args_to_req(ynl, op_name, args.args, req)
|
||||
ynl.do(op_name, req)
|
||||
|
||||
def dumpit(ynl, args, op_name, extra = {}):
|
||||
"""
|
||||
Prepare request header, parse arguments and dumpit (filtering out the
|
||||
devices we're not interested in).
|
||||
"""
|
||||
reply = ynl.dump(op_name, { 'header': {} } | extra)
|
||||
if not reply:
|
||||
return {}
|
||||
|
||||
for msg in reply:
|
||||
if msg['header']['dev-name'] == args.device:
|
||||
if args.json:
|
||||
pprint.PrettyPrinter().pprint(msg)
|
||||
sys.exit(0)
|
||||
msg.pop('header', None)
|
||||
return msg
|
||||
|
||||
print(f"Not supported for device {args.device}")
|
||||
sys.exit(1)
|
||||
|
||||
def bits_to_dict(attr):
|
||||
"""
|
||||
Convert ynl-formatted bitmask to a dict of bit=value.
|
||||
"""
|
||||
ret = {}
|
||||
if 'bits' not in attr:
|
||||
return dict()
|
||||
if 'bit' not in attr['bits']:
|
||||
return dict()
|
||||
for bit in attr['bits']['bit']:
|
||||
if bit['name'] == '':
|
||||
continue
|
||||
name = bit['name']
|
||||
value = bit.get('value', False)
|
||||
ret[name] = value
|
||||
return ret
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='ethtool wannabe')
|
||||
parser.add_argument('--json', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('--show-priv-flags', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('--set-priv-flags', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('--show-eee', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('--set-eee', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-a', '--show-pause', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-A', '--set-pause', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-c', '--show-coalesce', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-C', '--set-coalesce', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-g', '--show-ring', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-G', '--set-ring', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-k', '--show-features', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-K', '--set-features', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-l', '--show-channels', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-L', '--set-channels', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-T', '--show-time-stamping', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('-S', '--statistics', action=argparse.BooleanOptionalAction)
|
||||
# TODO: --show-tunnels tunnel-info-get
|
||||
# TODO: --show-module module-get
|
||||
# TODO: --get-plca-cfg plca-get
|
||||
# TODO: --get-plca-status plca-get-status
|
||||
# TODO: --show-mm mm-get
|
||||
# TODO: --show-fec fec-get
|
||||
# TODO: --dump-module-eerpom module-eeprom-get
|
||||
# TODO: pse-get
|
||||
# TODO: rss-get
|
||||
parser.add_argument('device', metavar='device', type=str)
|
||||
parser.add_argument('args', metavar='args', type=str, nargs='*')
|
||||
global args
|
||||
args = parser.parse_args()
|
||||
|
||||
spec = '../../../Documentation/netlink/specs/ethtool.yaml'
|
||||
schema = '../../../Documentation/netlink/genetlink-legacy.yaml'
|
||||
|
||||
ynl = YnlFamily(spec, schema)
|
||||
|
||||
if args.set_priv_flags:
|
||||
# TODO: parse the bitmask
|
||||
print("not implemented")
|
||||
return
|
||||
|
||||
if args.set_eee:
|
||||
return doit(ynl, args, 'eee-set')
|
||||
|
||||
if args.set_pause:
|
||||
return doit(ynl, args, 'pause-set')
|
||||
|
||||
if args.set_coalesce:
|
||||
return doit(ynl, args, 'coalesce-set')
|
||||
|
||||
if args.set_features:
|
||||
# TODO: parse the bitmask
|
||||
print("not implemented")
|
||||
return
|
||||
|
||||
if args.set_channels:
|
||||
return doit(ynl, args, 'channels-set')
|
||||
|
||||
if args.set_ring:
|
||||
return doit(ynl, args, 'rings-set')
|
||||
|
||||
if args.show_priv_flags:
|
||||
flags = bits_to_dict(dumpit(ynl, args, 'privflags-get')['flags'])
|
||||
print_field(flags)
|
||||
return
|
||||
|
||||
if args.show_eee:
|
||||
eee = dumpit(ynl, args, 'eee-get')
|
||||
ours = bits_to_dict(eee['modes-ours'])
|
||||
peer = bits_to_dict(eee['modes-peer'])
|
||||
|
||||
if 'enabled' in eee:
|
||||
status = 'enabled' if eee['enabled'] else 'disabled'
|
||||
if 'active' in eee and eee['active']:
|
||||
status = status + ' - active'
|
||||
else:
|
||||
status = status + ' - inactive'
|
||||
else:
|
||||
status = 'not supported'
|
||||
|
||||
print(f'EEE status: {status}')
|
||||
print_field(eee, ('tx-lpi-timer', 'Tx LPI'))
|
||||
print_speed('Advertised EEE link modes', ours)
|
||||
print_speed('Link partner advertised EEE link modes', peer)
|
||||
|
||||
return
|
||||
|
||||
if args.show_pause:
|
||||
print_field(dumpit(ynl, args, 'pause-get'),
|
||||
('autoneg', 'Autonegotiate', 'bool'),
|
||||
('rx', 'RX', 'bool'),
|
||||
('tx', 'TX', 'bool'))
|
||||
return
|
||||
|
||||
if args.show_coalesce:
|
||||
print_field(dumpit(ynl, args, 'coalesce-get'))
|
||||
return
|
||||
|
||||
if args.show_features:
|
||||
reply = dumpit(ynl, args, 'features-get')
|
||||
available = bits_to_dict(reply['hw'])
|
||||
requested = bits_to_dict(reply['wanted']).keys()
|
||||
active = bits_to_dict(reply['active']).keys()
|
||||
never_changed = bits_to_dict(reply['nochange']).keys()
|
||||
|
||||
for f in sorted(available):
|
||||
value = "off"
|
||||
if f in active:
|
||||
value = "on"
|
||||
|
||||
fixed = ""
|
||||
if f not in available or f in never_changed:
|
||||
fixed = " [fixed]"
|
||||
|
||||
req = ""
|
||||
if f in requested:
|
||||
if f in active:
|
||||
req = " [requested on]"
|
||||
else:
|
||||
req = " [requested off]"
|
||||
|
||||
print(f'{f}: {value}{fixed}{req}')
|
||||
|
||||
return
|
||||
|
||||
if args.show_channels:
|
||||
reply = dumpit(ynl, args, 'channels-get')
|
||||
print(f'Channel parameters for {args.device}:')
|
||||
|
||||
print(f'Pre-set maximums:')
|
||||
print_field(reply,
|
||||
('rx-max', 'RX'),
|
||||
('tx-max', 'TX'),
|
||||
('other-max', 'Other'),
|
||||
('combined-max', 'Combined'))
|
||||
|
||||
print(f'Current hardware settings:')
|
||||
print_field(reply,
|
||||
('rx-count', 'RX'),
|
||||
('tx-count', 'TX'),
|
||||
('other-count', 'Other'),
|
||||
('combined-count', 'Combined'))
|
||||
|
||||
return
|
||||
|
||||
if args.show_ring:
|
||||
reply = dumpit(ynl, args, 'channels-get')
|
||||
|
||||
print(f'Ring parameters for {args.device}:')
|
||||
|
||||
print(f'Pre-set maximums:')
|
||||
print_field(reply,
|
||||
('rx-max', 'RX'),
|
||||
('rx-mini-max', 'RX Mini'),
|
||||
('rx-jumbo-max', 'RX Jumbo'),
|
||||
('tx-max', 'TX'))
|
||||
|
||||
print(f'Current hardware settings:')
|
||||
print_field(reply,
|
||||
('rx', 'RX'),
|
||||
('rx-mini', 'RX Mini'),
|
||||
('rx-jumbo', 'RX Jumbo'),
|
||||
('tx', 'TX'))
|
||||
|
||||
print_field(reply,
|
||||
('rx-buf-len', 'RX Buf Len'),
|
||||
('cqe-size', 'CQE Size'),
|
||||
('tx-push', 'TX Push', 'bool'))
|
||||
|
||||
return
|
||||
|
||||
if args.statistics:
|
||||
print(f'NIC statistics:')
|
||||
|
||||
# TODO: pass id?
|
||||
strset = dumpit(ynl, args, 'strset-get')
|
||||
pprint.PrettyPrinter().pprint(strset)
|
||||
|
||||
req = {
|
||||
'groups': {
|
||||
'size': 1,
|
||||
'bits': {
|
||||
'bit':
|
||||
# TODO: support passing the bitmask
|
||||
#[
|
||||
#{ 'name': 'eth-phy', 'value': True },
|
||||
{ 'name': 'eth-mac', 'value': True },
|
||||
#{ 'name': 'eth-ctrl', 'value': True },
|
||||
#{ 'name': 'rmon', 'value': True },
|
||||
#],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rsp = dumpit(ynl, args, 'stats-get', req)
|
||||
pprint.PrettyPrinter().pprint(rsp)
|
||||
return
|
||||
|
||||
if args.show_time_stamping:
|
||||
tsinfo = dumpit(ynl, args, 'tsinfo-get')
|
||||
|
||||
print(f'Time stamping parameters for {args.device}:')
|
||||
|
||||
print('Capabilities:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
|
||||
|
||||
print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
|
||||
|
||||
print('Hardware Transmit Timestamp Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
|
||||
|
||||
print('Hardware Receive Filter Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
|
||||
return
|
||||
|
||||
print(f'Settings for {args.device}:')
|
||||
linkmodes = dumpit(ynl, args, 'linkmodes-get')
|
||||
ours = bits_to_dict(linkmodes['ours'])
|
||||
|
||||
supported_ports = ('TP', 'AUI', 'BNC', 'MII', 'FIBRE', 'Backplane')
|
||||
ports = [ p for p in supported_ports if ours.get(p, False)]
|
||||
print(f'Supported ports: [ {" ".join(ports)} ]')
|
||||
|
||||
print_speed('Supported link modes', ours)
|
||||
|
||||
print_field(ours, ('Pause', 'Supported pause frame use', 'yn'))
|
||||
print_field(ours, ('Autoneg', 'Supports auto-negotiation', 'yn'))
|
||||
|
||||
supported_fec = ('None', 'PS', 'BASER', 'LLRS')
|
||||
fec = [ p for p in supported_fec if ours.get(p, False)]
|
||||
fec_str = " ".join(fec)
|
||||
if len(fec) == 0:
|
||||
fec_str = "Not reported"
|
||||
|
||||
print(f'Supported FEC modes: {fec_str}')
|
||||
|
||||
speed = 'Unknown!'
|
||||
if linkmodes['speed'] > 0 and linkmodes['speed'] < 0xffffffff:
|
||||
speed = f'{linkmodes["speed"]}Mb/s'
|
||||
print(f'Speed: {speed}')
|
||||
|
||||
duplex_modes = {
|
||||
0: 'Half',
|
||||
1: 'Full',
|
||||
}
|
||||
duplex = duplex_modes.get(linkmodes["duplex"], None)
|
||||
if not duplex:
|
||||
duplex = f'Unknown! ({linkmodes["duplex"]})'
|
||||
print(f'Duplex: {duplex}')
|
||||
|
||||
autoneg = "off"
|
||||
if linkmodes.get("autoneg", 0) != 0:
|
||||
autoneg = "on"
|
||||
print(f'Auto-negotiation: {autoneg}')
|
||||
|
||||
ports = {
|
||||
0: 'Twisted Pair',
|
||||
1: 'AUI',
|
||||
2: 'MII',
|
||||
3: 'FIBRE',
|
||||
4: 'BNC',
|
||||
5: 'Directly Attached Copper',
|
||||
0xef: 'None',
|
||||
}
|
||||
linkinfo = dumpit(ynl, args, 'linkinfo-get')
|
||||
print(f'Port: {ports.get(linkinfo["port"], "Other")}')
|
||||
|
||||
print_field(linkinfo, ('phyaddr', 'PHYAD'))
|
||||
|
||||
transceiver = {
|
||||
0: 'Internal',
|
||||
1: 'External',
|
||||
}
|
||||
print(f'Transceiver: {transceiver.get(linkinfo["transceiver"], "Unknown")}')
|
||||
|
||||
mdix_ctrl = {
|
||||
1: 'off',
|
||||
2: 'on',
|
||||
}
|
||||
mdix = mdix_ctrl.get(linkinfo['tp-mdix-ctrl'], None)
|
||||
if mdix:
|
||||
mdix = mdix + ' (forced)'
|
||||
else:
|
||||
mdix = mdix_ctrl.get(linkinfo['tp-mdix'], 'Unknown (auto)')
|
||||
print(f'MDI-X: {mdix}')
|
||||
|
||||
debug = dumpit(ynl, args, 'debug-get')
|
||||
msgmask = bits_to_dict(debug.get("msgmask", [])).keys()
|
||||
print(f'Current message level: {" ".join(msgmask)}')
|
||||
|
||||
linkstate = dumpit(ynl, args, 'linkstate-get')
|
||||
detected_states = {
|
||||
0: 'no',
|
||||
1: 'yes',
|
||||
}
|
||||
# TODO: wol-get
|
||||
detected = detected_states.get(linkstate['link'], 'unknown')
|
||||
print(f'Link detected: {detected}')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -90,8 +90,8 @@ class SpecEnumEntry(SpecElement):
|
||||
def raw_value(self):
|
||||
return self.value
|
||||
|
||||
def user_value(self):
|
||||
if self.enum_set['type'] == 'flags':
|
||||
def user_value(self, as_flags=None):
|
||||
if self.enum_set['type'] == 'flags' or as_flags:
|
||||
return 1 << self.value
|
||||
else:
|
||||
return self.value
|
||||
@@ -136,10 +136,10 @@ class SpecEnumSet(SpecElement):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_mask(self):
|
||||
def get_mask(self, as_flags=None):
|
||||
mask = 0
|
||||
for e in self.entries.values():
|
||||
mask += e.user_value()
|
||||
mask += e.user_value(as_flags)
|
||||
return mask
|
||||
|
||||
|
||||
@@ -149,8 +149,11 @@ class SpecAttr(SpecElement):
|
||||
Represents a single attribute type within an attr space.
|
||||
|
||||
Attributes:
|
||||
value numerical ID when serialized
|
||||
attr_set Attribute Set containing this attr
|
||||
value numerical ID when serialized
|
||||
attr_set Attribute Set containing this attr
|
||||
is_multi bool, attr may repeat multiple times
|
||||
struct_name string, name of struct definition
|
||||
sub_type string, name of sub type
|
||||
"""
|
||||
def __init__(self, family, attr_set, yaml, value):
|
||||
super().__init__(family, yaml)
|
||||
@@ -158,6 +161,9 @@ class SpecAttr(SpecElement):
|
||||
self.value = value
|
||||
self.attr_set = attr_set
|
||||
self.is_multi = yaml.get('multi-attr', False)
|
||||
self.struct_name = yaml.get('struct')
|
||||
self.sub_type = yaml.get('sub-type')
|
||||
self.byte_order = yaml.get('byte-order')
|
||||
|
||||
|
||||
class SpecAttrSet(SpecElement):
|
||||
@@ -214,22 +220,61 @@ class SpecAttrSet(SpecElement):
|
||||
return self.attrs.items()
|
||||
|
||||
|
||||
class SpecStructMember(SpecElement):
|
||||
"""Struct member attribute
|
||||
|
||||
Represents a single struct member attribute.
|
||||
|
||||
Attributes:
|
||||
type string, type of the member attribute
|
||||
"""
|
||||
def __init__(self, family, yaml):
|
||||
super().__init__(family, yaml)
|
||||
self.type = yaml['type']
|
||||
|
||||
|
||||
class SpecStruct(SpecElement):
|
||||
"""Netlink struct type
|
||||
|
||||
Represents a C struct definition.
|
||||
|
||||
Attributes:
|
||||
members ordered list of struct members
|
||||
"""
|
||||
def __init__(self, family, yaml):
|
||||
super().__init__(family, yaml)
|
||||
|
||||
self.members = []
|
||||
for member in yaml.get('members', []):
|
||||
self.members.append(self.new_member(family, member))
|
||||
|
||||
def new_member(self, family, elem):
|
||||
return SpecStructMember(family, elem)
|
||||
|
||||
def __iter__(self):
|
||||
yield from self.members
|
||||
|
||||
def items(self):
|
||||
return self.members.items()
|
||||
|
||||
|
||||
class SpecOperation(SpecElement):
|
||||
"""Netlink Operation
|
||||
|
||||
Information about a single Netlink operation.
|
||||
|
||||
Attributes:
|
||||
value numerical ID when serialized, None if req/rsp values differ
|
||||
value numerical ID when serialized, None if req/rsp values differ
|
||||
|
||||
req_value numerical ID when serialized, user -> kernel
|
||||
rsp_value numerical ID when serialized, user <- kernel
|
||||
is_call bool, whether the operation is a call
|
||||
is_async bool, whether the operation is a notification
|
||||
is_resv bool, whether the operation does not exist (it's just a reserved ID)
|
||||
attr_set attribute set name
|
||||
req_value numerical ID when serialized, user -> kernel
|
||||
rsp_value numerical ID when serialized, user <- kernel
|
||||
is_call bool, whether the operation is a call
|
||||
is_async bool, whether the operation is a notification
|
||||
is_resv bool, whether the operation does not exist (it's just a reserved ID)
|
||||
attr_set attribute set name
|
||||
fixed_header string, optional name of fixed header struct
|
||||
|
||||
yaml raw spec as loaded from the spec file
|
||||
yaml raw spec as loaded from the spec file
|
||||
"""
|
||||
def __init__(self, family, yaml, req_value, rsp_value):
|
||||
super().__init__(family, yaml)
|
||||
@@ -241,6 +286,7 @@ class SpecOperation(SpecElement):
|
||||
self.is_call = 'do' in yaml or 'dump' in yaml
|
||||
self.is_async = 'notify' in yaml or 'event' in yaml
|
||||
self.is_resv = not self.is_async and not self.is_call
|
||||
self.fixed_header = self.yaml.get('fixed-header', family.fixed_header)
|
||||
|
||||
# Added by resolve:
|
||||
self.attr_set = None
|
||||
@@ -281,6 +327,7 @@ class SpecFamily(SpecElement):
|
||||
msgs_by_value dict of all messages (indexed by name)
|
||||
ops dict of all valid requests / responses
|
||||
consts dict of all constants/enums
|
||||
fixed_header string, optional name of family default fixed header struct
|
||||
"""
|
||||
def __init__(self, spec_path, schema_path=None):
|
||||
with open(spec_path, "r") as stream:
|
||||
@@ -344,6 +391,9 @@ class SpecFamily(SpecElement):
|
||||
def new_attr_set(self, elem):
|
||||
return SpecAttrSet(self, elem)
|
||||
|
||||
def new_struct(self, elem):
|
||||
return SpecStruct(self, elem)
|
||||
|
||||
def new_operation(self, elem, req_val, rsp_val):
|
||||
return SpecOperation(self, elem, req_val, rsp_val)
|
||||
|
||||
@@ -351,6 +401,7 @@ class SpecFamily(SpecElement):
|
||||
self._resolution_list.append(elem)
|
||||
|
||||
def _dictify_ops_unified(self):
|
||||
self.fixed_header = self.yaml['operations'].get('fixed-header')
|
||||
val = 1
|
||||
for elem in self.yaml['operations']['list']:
|
||||
if 'value' in elem:
|
||||
@@ -362,6 +413,7 @@ class SpecFamily(SpecElement):
|
||||
self.msgs[op.name] = op
|
||||
|
||||
def _dictify_ops_directional(self):
|
||||
self.fixed_header = self.yaml['operations'].get('fixed-header')
|
||||
req_val = rsp_val = 1
|
||||
for elem in self.yaml['operations']['list']:
|
||||
if 'notify' in elem:
|
||||
@@ -392,6 +444,15 @@ class SpecFamily(SpecElement):
|
||||
|
||||
self.msgs[op.name] = op
|
||||
|
||||
def find_operation(self, name):
|
||||
"""
|
||||
For a given operation name, find and return operation spec.
|
||||
"""
|
||||
for op in self.yaml['operations']['list']:
|
||||
if name == op['name']:
|
||||
return op
|
||||
return None
|
||||
|
||||
def resolve(self):
|
||||
self.resolve_up(super())
|
||||
|
||||
@@ -399,6 +460,8 @@ class SpecFamily(SpecElement):
|
||||
for elem in definitions:
|
||||
if elem['type'] == 'enum' or elem['type'] == 'flags':
|
||||
self.consts[elem['name']] = self.new_enum(elem)
|
||||
elif elem['type'] == 'struct':
|
||||
self.consts[elem['name']] = self.new_struct(elem)
|
||||
else:
|
||||
self.consts[elem['name']] = elem
|
||||
|
||||
|
||||
@@ -67,7 +67,20 @@ class Netlink:
|
||||
NLMSGERR_ATTR_MISS_NEST = 6
|
||||
|
||||
|
||||
class NlError(Exception):
|
||||
def __init__(self, nl_msg):
|
||||
self.nl_msg = nl_msg
|
||||
|
||||
def __str__(self):
|
||||
return f"Netlink error: {os.strerror(-self.nl_msg.error)}\n{self.nl_msg}"
|
||||
|
||||
|
||||
class NlAttr:
|
||||
type_formats = { 'u8' : ('B', 1), 's8' : ('b', 1),
|
||||
'u16': ('H', 2), 's16': ('h', 2),
|
||||
'u32': ('I', 4), 's32': ('i', 4),
|
||||
'u64': ('Q', 8), 's64': ('q', 8) }
|
||||
|
||||
def __init__(self, raw, offset):
|
||||
self._len, self._type = struct.unpack("HH", raw[offset:offset + 4])
|
||||
self.type = self._type & ~Netlink.NLA_TYPE_MASK
|
||||
@@ -75,17 +88,25 @@ class NlAttr:
|
||||
self.full_len = (self.payload_len + 3) & ~3
|
||||
self.raw = raw[offset + 4:offset + self.payload_len]
|
||||
|
||||
def format_byte_order(byte_order):
|
||||
if byte_order:
|
||||
return ">" if byte_order == "big-endian" else "<"
|
||||
return ""
|
||||
|
||||
def as_u8(self):
|
||||
return struct.unpack("B", self.raw)[0]
|
||||
|
||||
def as_u16(self):
|
||||
return struct.unpack("H", self.raw)[0]
|
||||
def as_u16(self, byte_order=None):
|
||||
endian = NlAttr.format_byte_order(byte_order)
|
||||
return struct.unpack(f"{endian}H", self.raw)[0]
|
||||
|
||||
def as_u32(self):
|
||||
return struct.unpack("I", self.raw)[0]
|
||||
def as_u32(self, byte_order=None):
|
||||
endian = NlAttr.format_byte_order(byte_order)
|
||||
return struct.unpack(f"{endian}I", self.raw)[0]
|
||||
|
||||
def as_u64(self):
|
||||
return struct.unpack("Q", self.raw)[0]
|
||||
def as_u64(self, byte_order=None):
|
||||
endian = NlAttr.format_byte_order(byte_order)
|
||||
return struct.unpack(f"{endian}Q", self.raw)[0]
|
||||
|
||||
def as_strz(self):
|
||||
return self.raw.decode('ascii')[:-1]
|
||||
@@ -93,6 +114,21 @@ class NlAttr:
|
||||
def as_bin(self):
|
||||
return self.raw
|
||||
|
||||
def as_c_array(self, type):
|
||||
format, _ = self.type_formats[type]
|
||||
return list({ x[0] for x in struct.iter_unpack(format, self.raw) })
|
||||
|
||||
def as_struct(self, members):
|
||||
value = dict()
|
||||
offset = 0
|
||||
for m in members:
|
||||
# TODO: handle non-scalar members
|
||||
format, size = self.type_formats[m.type]
|
||||
decoded = struct.unpack_from(format, self.raw, offset)
|
||||
offset += size
|
||||
value[m.name] = decoded[0]
|
||||
return value
|
||||
|
||||
def __repr__(self):
|
||||
return f"[type:{self.type} len:{self._len}] {self.raw}"
|
||||
|
||||
@@ -258,14 +294,22 @@ def _genl_load_families():
|
||||
|
||||
|
||||
class GenlMsg:
|
||||
def __init__(self, nl_msg):
|
||||
def __init__(self, nl_msg, fixed_header_members=[]):
|
||||
self.nl = nl_msg
|
||||
|
||||
self.hdr = nl_msg.raw[0:4]
|
||||
self.raw = nl_msg.raw[4:]
|
||||
offset = 4
|
||||
|
||||
self.genl_cmd, self.genl_version, _ = struct.unpack("BBH", self.hdr)
|
||||
|
||||
self.fixed_header_attrs = dict()
|
||||
for m in fixed_header_members:
|
||||
format, size = NlAttr.type_formats[m.type]
|
||||
decoded = struct.unpack_from(format, nl_msg.raw, offset)
|
||||
offset += size
|
||||
self.fixed_header_attrs[m.name] = decoded[0]
|
||||
|
||||
self.raw = nl_msg.raw[offset:]
|
||||
self.raw_attrs = NlAttrs(self.raw)
|
||||
|
||||
def __repr__(self):
|
||||
@@ -314,7 +358,10 @@ class YnlFamily(SpecFamily):
|
||||
bound_f = functools.partial(self._op, op_name)
|
||||
setattr(self, op.ident_name, bound_f)
|
||||
|
||||
self.family = GenlFamily(self.yaml['name'])
|
||||
try:
|
||||
self.family = GenlFamily(self.yaml['name'])
|
||||
except KeyError:
|
||||
raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
|
||||
|
||||
def ntf_subscribe(self, mcast_name):
|
||||
if mcast_name not in self.family.genl_family['mcast']:
|
||||
@@ -334,8 +381,17 @@ class YnlFamily(SpecFamily):
|
||||
attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
|
||||
elif attr["type"] == 'flag':
|
||||
attr_payload = b''
|
||||
elif attr["type"] == 'u8':
|
||||
attr_payload = struct.pack("B", int(value))
|
||||
elif attr["type"] == 'u16':
|
||||
endian = NlAttr.format_byte_order(attr.byte_order)
|
||||
attr_payload = struct.pack(f"{endian}H", int(value))
|
||||
elif attr["type"] == 'u32':
|
||||
attr_payload = struct.pack("I", int(value))
|
||||
endian = NlAttr.format_byte_order(attr.byte_order)
|
||||
attr_payload = struct.pack(f"{endian}I", int(value))
|
||||
elif attr["type"] == 'u64':
|
||||
endian = NlAttr.format_byte_order(attr.byte_order)
|
||||
attr_payload = struct.pack(f"{endian}Q", int(value))
|
||||
elif attr["type"] == 'string':
|
||||
attr_payload = str(value).encode('ascii') + b'\x00'
|
||||
elif attr["type"] == 'binary':
|
||||
@@ -361,6 +417,15 @@ class YnlFamily(SpecFamily):
|
||||
value = enum.entries_by_val[raw - i].name
|
||||
rsp[attr_spec['name']] = value
|
||||
|
||||
def _decode_binary(self, attr, attr_spec):
|
||||
if attr_spec.struct_name:
|
||||
decoded = attr.as_struct(self.consts[attr_spec.struct_name])
|
||||
elif attr_spec.sub_type:
|
||||
decoded = attr.as_c_array(attr_spec.sub_type)
|
||||
else:
|
||||
decoded = attr.as_bin()
|
||||
return decoded
|
||||
|
||||
def _decode(self, attrs, space):
|
||||
attr_space = self.attr_sets[space]
|
||||
rsp = dict()
|
||||
@@ -371,14 +436,16 @@ class YnlFamily(SpecFamily):
|
||||
decoded = subdict
|
||||
elif attr_spec['type'] == 'u8':
|
||||
decoded = attr.as_u8()
|
||||
elif attr_spec['type'] == 'u16':
|
||||
decoded = attr.as_u16(attr_spec.byte_order)
|
||||
elif attr_spec['type'] == 'u32':
|
||||
decoded = attr.as_u32()
|
||||
decoded = attr.as_u32(attr_spec.byte_order)
|
||||
elif attr_spec['type'] == 'u64':
|
||||
decoded = attr.as_u64()
|
||||
decoded = attr.as_u64(attr_spec.byte_order)
|
||||
elif attr_spec["type"] == 'string':
|
||||
decoded = attr.as_strz()
|
||||
elif attr_spec["type"] == 'binary':
|
||||
decoded = attr.as_bin()
|
||||
decoded = self._decode_binary(attr, attr_spec)
|
||||
elif attr_spec["type"] == 'flag':
|
||||
decoded = True
|
||||
else:
|
||||
@@ -463,6 +530,17 @@ class YnlFamily(SpecFamily):
|
||||
|
||||
self.handle_ntf(nl_msg, gm)
|
||||
|
||||
def operation_do_attributes(self, name):
|
||||
"""
|
||||
For a given operation name, find and return a supported
|
||||
set of attributes (as a dict).
|
||||
"""
|
||||
op = self.find_operation(name)
|
||||
if not op:
|
||||
return None
|
||||
|
||||
return op['do']['request']['attributes'].copy()
|
||||
|
||||
def _op(self, method, vals, dump=False):
|
||||
op = self.ops[method]
|
||||
|
||||
@@ -472,6 +550,13 @@ class YnlFamily(SpecFamily):
|
||||
|
||||
req_seq = random.randint(1024, 65535)
|
||||
msg = _genl_msg(self.family.family_id, nl_flags, op.req_value, 1, req_seq)
|
||||
fixed_header_members = []
|
||||
if op.fixed_header:
|
||||
fixed_header_members = self.consts[op.fixed_header].members
|
||||
for m in fixed_header_members:
|
||||
value = vals.pop(m.name)
|
||||
format, _ = NlAttr.type_formats[m.type]
|
||||
msg += struct.pack(format, value)
|
||||
for name, value in vals.items():
|
||||
msg += self._add_attr(op.attr_set.name, name, value)
|
||||
msg = _genl_msg_finalize(msg)
|
||||
@@ -488,9 +573,7 @@ class YnlFamily(SpecFamily):
|
||||
self._decode_extack(msg, op.attr_set, nl_msg.extack)
|
||||
|
||||
if nl_msg.error:
|
||||
print("Netlink error:", os.strerror(-nl_msg.error))
|
||||
print(nl_msg)
|
||||
return
|
||||
raise NlError(nl_msg)
|
||||
if nl_msg.done:
|
||||
if nl_msg.extack:
|
||||
print("Netlink warning:")
|
||||
@@ -498,7 +581,7 @@ class YnlFamily(SpecFamily):
|
||||
done = True
|
||||
break
|
||||
|
||||
gm = GenlMsg(nl_msg)
|
||||
gm = GenlMsg(nl_msg, fixed_header_members)
|
||||
# Check if this is a reply to our request
|
||||
if nl_msg.nl_seq != req_seq or gm.genl_cmd != op.rsp_value:
|
||||
if gm.genl_cmd in self.async_msg_ids:
|
||||
@@ -508,7 +591,8 @@ class YnlFamily(SpecFamily):
|
||||
print('Unexpected message: ' + repr(gm))
|
||||
continue
|
||||
|
||||
rsp.append(self._decode(gm.raw_attrs, op.attr_set.name))
|
||||
rsp.append(self._decode(gm.raw_attrs, op.attr_set.name)
|
||||
| gm.fixed_header_attrs)
|
||||
|
||||
if not rsp:
|
||||
return None
|
||||
|
||||
2
tools/net/ynl/requirements.txt
Normal file
2
tools/net/ynl/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
jsonschema==4.*
|
||||
PyYAML==6.*
|
||||
@@ -254,7 +254,8 @@ class TypeScalar(Type):
|
||||
def _attr_policy(self, policy):
|
||||
if 'flags-mask' in self.checks or self.is_bitfield:
|
||||
if self.is_bitfield:
|
||||
mask = self.family.consts[self.attr['enum']].get_mask()
|
||||
enum = self.family.consts[self.attr['enum']]
|
||||
mask = enum.get_mask(as_flags=True)
|
||||
else:
|
||||
flags = self.family.consts[self.checks['flags-mask']]
|
||||
flag_cnt = len(flags['entries'])
|
||||
@@ -1696,7 +1697,9 @@ def print_kernel_op_table_fwd(family, cw, terminate):
|
||||
'split': 'genl_split_ops'}
|
||||
struct_type = pol_to_struct[family.kernel_policy]
|
||||
|
||||
if family.kernel_policy == 'split':
|
||||
if not exported:
|
||||
cnt = ""
|
||||
elif family.kernel_policy == 'split':
|
||||
cnt = 0
|
||||
for op in family.ops.values():
|
||||
if 'do' in op:
|
||||
|
||||
@@ -108,6 +108,8 @@ endif # GCC_TOOLCHAIN_DIR
|
||||
endif # CLANG_CROSS_FLAGS
|
||||
CFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
AFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
else
|
||||
CLANG_CROSS_FLAGS :=
|
||||
endif # CROSS_COMPILE
|
||||
|
||||
# Hack to avoid type-punned warnings on old systems such as RHEL5:
|
||||
|
||||
@@ -44,6 +44,7 @@ lookup_key # test_lookup_key__attach unexp
|
||||
lru_bug # lru_bug__attach unexpected error: -524 (errno 524)
|
||||
modify_return # modify_return__attach failed unexpected error: -524 (errno 524)
|
||||
module_attach # skel_attach skeleton attach failed: -524
|
||||
module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0
|
||||
mptcp/base # run_test mptcp unexpected error: -524 (errno 524)
|
||||
netcnt # packets unexpected packets: actual 10001 != expected 10000
|
||||
rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22
|
||||
|
||||
@@ -4,10 +4,14 @@ bloom_filter_map # failed to find kernel BTF type ID of
|
||||
bpf_cookie # failed to open_and_load program: -524 (trampoline)
|
||||
bpf_loop # attaches to __x64_sys_nanosleep
|
||||
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
|
||||
dynptr/test_dynptr_skb_data
|
||||
dynptr/test_skb_readonly
|
||||
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
|
||||
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
|
||||
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
|
||||
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
kprobe_multi_test # relies on fentry
|
||||
ksyms_btf/weak_ksyms* # test_ksyms_weak__open_and_load unexpected error: -22 (kfunc)
|
||||
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
|
||||
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
|
||||
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
|
||||
|
||||
@@ -36,7 +36,7 @@ endif
|
||||
|
||||
# Order correspond to 'make run_tests' order
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
|
||||
test_verifier_log test_dev_cgroup \
|
||||
test_dev_cgroup \
|
||||
test_sock test_sockmap get_cgroup_id_user \
|
||||
test_cgroup_storage \
|
||||
test_tcpnotify_user test_sysctl \
|
||||
@@ -201,7 +201,7 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
|
||||
$< -o $@ \
|
||||
$(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
|
||||
|
||||
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
|
||||
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
|
||||
$(call msg,MOD,,$@)
|
||||
$(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation
|
||||
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod
|
||||
@@ -231,9 +231,11 @@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
|
||||
|
||||
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
|
||||
|
||||
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
|
||||
TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
|
||||
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
|
||||
UNPRIV_HELPERS := $(OUTPUT)/unpriv_helpers.o
|
||||
TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
|
||||
JSON_WRITER := $(OUTPUT)/json_writer.o
|
||||
CAP_HELPERS := $(OUTPUT)/cap_helpers.o
|
||||
|
||||
$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
|
||||
@@ -251,7 +253,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/xdping: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
|
||||
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) $(UNPRIV_HELPERS)
|
||||
$(OUTPUT)/xsk.o: $(BPFOBJ)
|
||||
|
||||
BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
@@ -338,7 +340,8 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
|
||||
define get_sys_includes
|
||||
$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
|
||||
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
|
||||
$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
|
||||
$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') \
|
||||
$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}')
|
||||
endef
|
||||
|
||||
# Determine target endianness.
|
||||
@@ -351,7 +354,7 @@ CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
|
||||
endif
|
||||
|
||||
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
|
||||
BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
|
||||
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
|
||||
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
|
||||
-I$(abspath $(OUTPUT)/../usr/include)
|
||||
|
||||
@@ -558,7 +561,9 @@ TRUNNER_BPF_PROGS_DIR := progs
|
||||
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
|
||||
network_helpers.c testing_helpers.c \
|
||||
btf_helpers.c flow_dissector_load.h \
|
||||
cap_helpers.c test_loader.c xsk.c
|
||||
cap_helpers.c test_loader.c xsk.c disasm.c \
|
||||
json_writer.c unpriv_helpers.c
|
||||
|
||||
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
|
||||
$(OUTPUT)/liburandom_read.so \
|
||||
$(OUTPUT)/xdp_synproxy \
|
||||
@@ -607,7 +612,7 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
|
||||
|
||||
$(OUTPUT)/xskxceiver: xskxceiver.c $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
|
||||
$(OUTPUT)/xskxceiver: xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
|
||||
|
||||
@@ -638,6 +643,7 @@ $(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
@@ -655,6 +661,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
$(OUTPUT)/bench_local_storage.o \
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \
|
||||
$(OUTPUT)/bench_bpf_hashmap_lookup.o \
|
||||
$(OUTPUT)/bench_local_storage_create.o \
|
||||
#
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
|
||||
9
tools/testing/selftests/bpf/autoconf_helper.h
Normal file
9
tools/testing/selftests/bpf/autoconf_helper.h
Normal file
@@ -0,0 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#ifdef HAVE_GENHDR
|
||||
# include "autoconf.h"
|
||||
#else
|
||||
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
|
||||
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
@@ -278,6 +278,7 @@ extern struct argp bench_local_storage_argp;
|
||||
extern struct argp bench_local_storage_rcu_tasks_trace_argp;
|
||||
extern struct argp bench_strncmp_argp;
|
||||
extern struct argp bench_hashmap_lookup_argp;
|
||||
extern struct argp bench_local_storage_create_argp;
|
||||
|
||||
static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
|
||||
@@ -288,6 +289,7 @@ static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_local_storage_rcu_tasks_trace_argp, 0,
|
||||
"local_storage RCU Tasks Trace slowdown benchmark", 0 },
|
||||
{ &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 },
|
||||
{ &bench_local_storage_create_argp, 0, "local-storage-create benchmark", 0 },
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -515,6 +517,7 @@ extern const struct bench bench_local_storage_cache_interleaved_get;
|
||||
extern const struct bench bench_local_storage_cache_hashmap_control;
|
||||
extern const struct bench bench_local_storage_tasks_trace;
|
||||
extern const struct bench bench_bpf_hashmap_lookup;
|
||||
extern const struct bench bench_local_storage_create;
|
||||
|
||||
static const struct bench *benchs[] = {
|
||||
&bench_count_global,
|
||||
@@ -555,6 +558,7 @@ static const struct bench *benchs[] = {
|
||||
&bench_local_storage_cache_hashmap_control,
|
||||
&bench_local_storage_tasks_trace,
|
||||
&bench_bpf_hashmap_lookup,
|
||||
&bench_local_storage_create,
|
||||
};
|
||||
|
||||
static void find_benchmark(void)
|
||||
|
||||
264
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
Normal file
264
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
Normal file
@@ -0,0 +1,264 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <pthread.h>
|
||||
#include <argp.h>
|
||||
|
||||
#include "bench.h"
|
||||
#include "bench_local_storage_create.skel.h"
|
||||
|
||||
struct thread {
|
||||
int *fds;
|
||||
pthread_t *pthds;
|
||||
int *pthd_results;
|
||||
};
|
||||
|
||||
static struct bench_local_storage_create *skel;
|
||||
static struct thread *threads;
|
||||
static long create_owner_errs;
|
||||
static int storage_type = BPF_MAP_TYPE_SK_STORAGE;
|
||||
static int batch_sz = 32;
|
||||
|
||||
enum {
|
||||
ARG_BATCH_SZ = 9000,
|
||||
ARG_STORAGE_TYPE = 9001,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ "batch-size", ARG_BATCH_SZ, "BATCH_SIZE", 0,
|
||||
"The number of storage creations in each batch" },
|
||||
{ "storage-type", ARG_STORAGE_TYPE, "STORAGE_TYPE", 0,
|
||||
"The type of local storage to test (socket or task)" },
|
||||
{},
|
||||
};
|
||||
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (key) {
|
||||
case ARG_BATCH_SZ:
|
||||
ret = atoi(arg);
|
||||
if (ret < 1) {
|
||||
fprintf(stderr, "invalid batch-size\n");
|
||||
argp_usage(state);
|
||||
}
|
||||
batch_sz = ret;
|
||||
break;
|
||||
case ARG_STORAGE_TYPE:
|
||||
if (!strcmp(arg, "task")) {
|
||||
storage_type = BPF_MAP_TYPE_TASK_STORAGE;
|
||||
} else if (!strcmp(arg, "socket")) {
|
||||
storage_type = BPF_MAP_TYPE_SK_STORAGE;
|
||||
} else {
|
||||
fprintf(stderr, "invalid storage-type (socket or task)\n");
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct argp bench_local_storage_create_argp = {
|
||||
.options = opts,
|
||||
.parser = parse_arg,
|
||||
};
|
||||
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.consumer_cnt > 1) {
|
||||
fprintf(stderr,
|
||||
"local-storage-create benchmark does not need consumer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void setup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
skel = bench_local_storage_create__open_and_load();
|
||||
if (!skel) {
|
||||
fprintf(stderr, "error loading skel\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
skel->bss->bench_pid = getpid();
|
||||
if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
|
||||
if (!bpf_program__attach(skel->progs.socket_post_create)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
if (!bpf_program__attach(skel->progs.sched_process_fork)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!bpf_program__attach(skel->progs.kmalloc)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
threads = calloc(env.producer_cnt, sizeof(*threads));
|
||||
|
||||
if (!threads) {
|
||||
fprintf(stderr, "cannot alloc thread_res\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < env.producer_cnt; i++) {
|
||||
struct thread *t = &threads[i];
|
||||
|
||||
if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
|
||||
t->fds = malloc(batch_sz * sizeof(*t->fds));
|
||||
if (!t->fds) {
|
||||
fprintf(stderr, "cannot alloc t->fds\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
t->pthds = malloc(batch_sz * sizeof(*t->pthds));
|
||||
if (!t->pthds) {
|
||||
fprintf(stderr, "cannot alloc t->pthds\n");
|
||||
exit(1);
|
||||
}
|
||||
t->pthd_results = malloc(batch_sz * sizeof(*t->pthd_results));
|
||||
if (!t->pthd_results) {
|
||||
fprintf(stderr, "cannot alloc t->pthd_results\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void measure(struct bench_res *res)
|
||||
{
|
||||
res->hits = atomic_swap(&skel->bss->create_cnts, 0);
|
||||
res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0);
|
||||
}
|
||||
|
||||
static void *consumer(void *input)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *sk_producer(void *input)
|
||||
{
|
||||
struct thread *t = &threads[(long)(input)];
|
||||
int *fds = t->fds;
|
||||
int i;
|
||||
|
||||
while (true) {
|
||||
for (i = 0; i < batch_sz; i++) {
|
||||
fds[i] = socket(AF_INET6, SOCK_DGRAM, 0);
|
||||
if (fds[i] == -1)
|
||||
atomic_inc(&create_owner_errs);
|
||||
}
|
||||
|
||||
for (i = 0; i < batch_sz; i++) {
|
||||
if (fds[i] != -1)
|
||||
close(fds[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *thread_func(void *arg)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *task_producer(void *input)
|
||||
{
|
||||
struct thread *t = &threads[(long)(input)];
|
||||
pthread_t *pthds = t->pthds;
|
||||
int *pthd_results = t->pthd_results;
|
||||
int i;
|
||||
|
||||
while (true) {
|
||||
for (i = 0; i < batch_sz; i++) {
|
||||
pthd_results[i] = pthread_create(&pthds[i], NULL, thread_func, NULL);
|
||||
if (pthd_results[i])
|
||||
atomic_inc(&create_owner_errs);
|
||||
}
|
||||
|
||||
for (i = 0; i < batch_sz; i++) {
|
||||
if (!pthd_results[i])
|
||||
pthread_join(pthds[i], NULL);;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *producer(void *input)
|
||||
{
|
||||
if (storage_type == BPF_MAP_TYPE_SK_STORAGE)
|
||||
return sk_producer(input);
|
||||
else
|
||||
return task_producer(input);
|
||||
}
|
||||
|
||||
static void report_progress(int iter, struct bench_res *res, long delta_ns)
|
||||
{
|
||||
double creates_per_sec, kmallocs_per_create;
|
||||
|
||||
creates_per_sec = res->hits / 1000.0 / (delta_ns / 1000000000.0);
|
||||
kmallocs_per_create = (double)res->drops / res->hits;
|
||||
|
||||
printf("Iter %3d (%7.3lfus): ",
|
||||
iter, (delta_ns - 1000000000) / 1000.0);
|
||||
printf("creates %8.3lfk/s (%7.3lfk/prod), ",
|
||||
creates_per_sec, creates_per_sec / env.producer_cnt);
|
||||
printf("%3.2lf kmallocs/create\n", kmallocs_per_create);
|
||||
}
|
||||
|
||||
static void report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
double creates_mean = 0.0, creates_stddev = 0.0;
|
||||
long total_creates = 0, total_kmallocs = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < res_cnt; i++) {
|
||||
creates_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
|
||||
total_creates += res[i].hits;
|
||||
total_kmallocs += res[i].drops;
|
||||
}
|
||||
|
||||
if (res_cnt > 1) {
|
||||
for (i = 0; i < res_cnt; i++)
|
||||
creates_stddev += (creates_mean - res[i].hits / 1000.0) *
|
||||
(creates_mean - res[i].hits / 1000.0) /
|
||||
(res_cnt - 1.0);
|
||||
creates_stddev = sqrt(creates_stddev);
|
||||
}
|
||||
printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ",
|
||||
creates_mean, creates_stddev, creates_mean / env.producer_cnt);
|
||||
printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates);
|
||||
if (create_owner_errs || skel->bss->create_errs)
|
||||
printf("%s() errors %ld create_errs %ld\n",
|
||||
storage_type == BPF_MAP_TYPE_SK_STORAGE ?
|
||||
"socket" : "pthread_create",
|
||||
create_owner_errs,
|
||||
skel->bss->create_errs);
|
||||
}
|
||||
|
||||
/* Benchmark performance of creating bpf local storage */
|
||||
const struct bench bench_local_storage_create = {
|
||||
.name = "local-storage-create",
|
||||
.argp = &bench_local_storage_create_argp,
|
||||
.validate = validate,
|
||||
.setup = setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = report_progress,
|
||||
.report_final = report_final,
|
||||
};
|
||||
@@ -14,7 +14,8 @@
|
||||
* type ID of a struct in program BTF.
|
||||
*
|
||||
* The 'local_type_id' parameter must be a known constant.
|
||||
* The 'meta' parameter is a hidden argument that is ignored.
|
||||
* The 'meta' parameter is rewritten by the verifier, no need for BPF
|
||||
* program to set it.
|
||||
* Returns
|
||||
* A pointer to an object of the type corresponding to the passed in
|
||||
* 'local_type_id', or NULL on failure.
|
||||
@@ -28,7 +29,8 @@ extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
|
||||
* Free an allocated object. All fields of the object that require
|
||||
* destruction will be destructed before the storage is freed.
|
||||
*
|
||||
* The 'meta' parameter is a hidden argument that is ignored.
|
||||
* The 'meta' parameter is rewritten by the verifier, no need for BPF
|
||||
* program to set it.
|
||||
* Returns
|
||||
* Void.
|
||||
*/
|
||||
@@ -38,18 +40,50 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
|
||||
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
|
||||
|
||||
/* Description
|
||||
* Add a new entry to the beginning of the BPF linked list.
|
||||
* Increment the refcount on a refcounted local kptr, turning the
|
||||
* non-owning reference input into an owning reference in the process.
|
||||
*
|
||||
* The 'meta' parameter is rewritten by the verifier, no need for BPF
|
||||
* program to set it.
|
||||
* Returns
|
||||
* Void.
|
||||
* An owning reference to the object pointed to by 'kptr'
|
||||
*/
|
||||
extern void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
|
||||
extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_refcount_acquire_impl */
|
||||
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
|
||||
|
||||
/* Description
|
||||
* Add a new entry to the beginning of the BPF linked list.
|
||||
*
|
||||
* The 'meta' and 'off' parameters are rewritten by the verifier, no need
|
||||
* for BPF programs to set them
|
||||
* Returns
|
||||
* 0 if the node was successfully added
|
||||
* -EINVAL if the node wasn't added because it's already in a list
|
||||
*/
|
||||
extern int bpf_list_push_front_impl(struct bpf_list_head *head,
|
||||
struct bpf_list_node *node,
|
||||
void *meta, __u64 off) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_list_push_front_impl */
|
||||
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
|
||||
|
||||
/* Description
|
||||
* Add a new entry to the end of the BPF linked list.
|
||||
*
|
||||
* The 'meta' and 'off' parameters are rewritten by the verifier, no need
|
||||
* for BPF programs to set them
|
||||
* Returns
|
||||
* Void.
|
||||
* 0 if the node was successfully added
|
||||
* -EINVAL if the node wasn't added because it's already in a list
|
||||
*/
|
||||
extern void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
|
||||
extern int bpf_list_push_back_impl(struct bpf_list_head *head,
|
||||
struct bpf_list_node *node,
|
||||
void *meta, __u64 off) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_list_push_back_impl */
|
||||
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
|
||||
|
||||
/* Description
|
||||
* Remove the entry at the beginning of the BPF linked list.
|
||||
@@ -75,11 +109,19 @@ extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
|
||||
|
||||
/* Description
|
||||
* Add 'node' to rbtree with root 'root' using comparator 'less'
|
||||
*
|
||||
* The 'meta' and 'off' parameters are rewritten by the verifier, no need
|
||||
* for BPF programs to set them
|
||||
* Returns
|
||||
* Nothing
|
||||
* 0 if the node was successfully added
|
||||
* -EINVAL if the node wasn't added because it's already in a tree
|
||||
*/
|
||||
extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym;
|
||||
extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
|
||||
void *meta, __u64 off) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_rbtree_add_impl */
|
||||
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
|
||||
|
||||
/* Description
|
||||
* Return the first (leftmost) node in input tree
|
||||
|
||||
38
tools/testing/selftests/bpf/bpf_kfuncs.h
Normal file
38
tools/testing/selftests/bpf/bpf_kfuncs.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#ifndef __BPF_KFUNCS__
|
||||
#define __BPF_KFUNCS__
|
||||
|
||||
/* Description
|
||||
* Initializes an skb-type dynptr
|
||||
* Returns
|
||||
* Error code
|
||||
*/
|
||||
extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit) __ksym;
|
||||
|
||||
/* Description
|
||||
* Initializes an xdp-type dynptr
|
||||
* Returns
|
||||
* Error code
|
||||
*/
|
||||
extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit) __ksym;
|
||||
|
||||
/* Description
|
||||
* Obtain a read-only pointer to the dynptr's data
|
||||
* Returns
|
||||
* Either a direct pointer to the dynptr data or a pointer to the user-provided
|
||||
* buffer if unable to obtain a direct pointer
|
||||
*/
|
||||
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
|
||||
void *buffer, __u32 buffer__szk) __ksym;
|
||||
|
||||
/* Description
|
||||
* Obtain a read-write pointer to the dynptr's data
|
||||
* Returns
|
||||
* Either a direct pointer to the dynptr data or a pointer to the user-provided
|
||||
* buffer if unable to obtain a direct pointer
|
||||
*/
|
||||
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
|
||||
void *buffer, __u32 buffer__szk) __ksym;
|
||||
|
||||
#endif
|
||||
@@ -28,6 +28,15 @@ struct bpf_testmod_struct_arg_2 {
|
||||
long b;
|
||||
};
|
||||
|
||||
struct bpf_testmod_struct_arg_3 {
|
||||
int a;
|
||||
int b[];
|
||||
};
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Wmissing-prototypes",
|
||||
"Global functions as their definitions will be in bpf_testmod.ko BTF");
|
||||
|
||||
noinline int
|
||||
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
|
||||
bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
|
||||
@@ -59,12 +68,46 @@ bpf_testmod_test_struct_arg_5(void) {
|
||||
return bpf_testmod_test_struct_arg_result;
|
||||
}
|
||||
|
||||
noinline int
|
||||
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
|
||||
bpf_testmod_test_struct_arg_result = a->b[0];
|
||||
return bpf_testmod_test_struct_arg_result;
|
||||
}
|
||||
|
||||
__bpf_kfunc void
|
||||
bpf_testmod_test_mod_kfunc(int i)
|
||||
{
|
||||
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
|
||||
{
|
||||
if (cnt < 0) {
|
||||
it->cnt = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
it->value = value;
|
||||
it->cnt = cnt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
|
||||
{
|
||||
if (it->cnt <= 0)
|
||||
return NULL;
|
||||
|
||||
it->cnt--;
|
||||
|
||||
return &it->value;
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
|
||||
{
|
||||
it->cnt = 0;
|
||||
}
|
||||
|
||||
struct bpf_testmod_btf_type_tag_1 {
|
||||
int a;
|
||||
};
|
||||
@@ -102,7 +145,11 @@ bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
|
||||
|
||||
noinline int bpf_testmod_loop_test(int n)
|
||||
{
|
||||
int i, sum = 0;
|
||||
/* Make sum volatile, so smart compilers, such as clang, will not
|
||||
* optimize the code by removing the loop.
|
||||
*/
|
||||
volatile int sum = 0;
|
||||
int i;
|
||||
|
||||
/* the primary goal of this test is to test LBR. Create a lot of
|
||||
* branches in the function, so we can catch it easily.
|
||||
@@ -143,6 +190,8 @@ noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
|
||||
return a + b + c;
|
||||
}
|
||||
|
||||
__diag_pop();
|
||||
|
||||
int bpf_testmod_fentry_ok;
|
||||
|
||||
noinline ssize_t
|
||||
@@ -157,6 +206,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
};
|
||||
struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
|
||||
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
|
||||
struct bpf_testmod_struct_arg_3 *struct_arg3;
|
||||
int i = 1;
|
||||
|
||||
while (bpf_testmod_return_ptr(i))
|
||||
@@ -168,6 +218,14 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
|
||||
(void)bpf_testmod_test_struct_arg_5();
|
||||
|
||||
struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
|
||||
sizeof(int)), GFP_KERNEL);
|
||||
if (struct_arg3 != NULL) {
|
||||
struct_arg3->b[0] = 1;
|
||||
(void)bpf_testmod_test_struct_arg_6(struct_arg3);
|
||||
kfree(struct_arg3);
|
||||
}
|
||||
|
||||
/* This is always true. Use the check to make sure the compiler
|
||||
* doesn't remove bpf_testmod_loop_test.
|
||||
*/
|
||||
@@ -220,6 +278,17 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
|
||||
.write = bpf_testmod_test_write,
|
||||
};
|
||||
|
||||
BTF_SET8_START(bpf_testmod_common_kfunc_ids)
|
||||
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
|
||||
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
|
||||
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &bpf_testmod_common_kfunc_ids,
|
||||
};
|
||||
|
||||
BTF_SET8_START(bpf_testmod_check_kfunc_ids)
|
||||
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
|
||||
BTF_SET8_END(bpf_testmod_check_kfunc_ids)
|
||||
@@ -229,13 +298,20 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
|
||||
.set = &bpf_testmod_check_kfunc_ids,
|
||||
};
|
||||
|
||||
noinline int bpf_fentry_shadow_test(int a)
|
||||
{
|
||||
return a + 2;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
|
||||
|
||||
extern int bpf_fentry_test1(int a);
|
||||
|
||||
static int bpf_testmod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
|
||||
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (bpf_fentry_test1(0) < 0)
|
||||
|
||||
@@ -22,4 +22,10 @@ struct bpf_testmod_test_writable_ctx {
|
||||
int val;
|
||||
};
|
||||
|
||||
/* BPF iter that returns *value* *n* times in a row */
|
||||
struct bpf_iter_testmod_seq {
|
||||
s64 value;
|
||||
int cnt;
|
||||
};
|
||||
|
||||
#endif /* _BPF_TESTMOD_H */
|
||||
|
||||
@@ -176,6 +176,8 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_VIRTIO_VSOCKETS_COMMON=y
|
||||
CONFIG_VLAN_8021Q=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VSOCKETS_LOOPBACK=y
|
||||
CONFIG_XFRM_USER=y
|
||||
|
||||
@@ -140,5 +140,8 @@ CONFIG_VIRTIO_BALLOON=y
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_VIRTIO_VSOCKETS_COMMON=y
|
||||
CONFIG_VLAN_8021Q=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VSOCKETS_LOOPBACK=y
|
||||
CONFIG_XFRM_USER=y
|
||||
|
||||
@@ -234,7 +234,10 @@ CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_VIRTIO_VSOCKETS_COMMON=y
|
||||
CONFIG_VLAN_8021Q=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VSOCKETS_LOOPBACK=y
|
||||
CONFIG_X86_ACPI_CPUFREQ=y
|
||||
CONFIG_X86_CPUID=y
|
||||
CONFIG_X86_MSR=y
|
||||
|
||||
1
tools/testing/selftests/bpf/disasm.c
Symbolic link
1
tools/testing/selftests/bpf/disasm.c
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../kernel/bpf/disasm.c
|
||||
1
tools/testing/selftests/bpf/disasm.h
Symbolic link
1
tools/testing/selftests/bpf/disasm.h
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../kernel/bpf/disasm.h
|
||||
@@ -86,8 +86,13 @@ int main(int argc, char **argv)
|
||||
pid = getpid();
|
||||
bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/tracing/events/%s/id", probe_name);
|
||||
} else {
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
|
||||
}
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
1
tools/testing/selftests/bpf/json_writer.c
Symbolic link
1
tools/testing/selftests/bpf/json_writer.c
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../bpf/bpftool/json_writer.c
|
||||
1
tools/testing/selftests/bpf/json_writer.h
Symbolic link
1
tools/testing/selftests/bpf/json_writer.h
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../bpf/bpftool/json_writer.h
|
||||
@@ -95,7 +95,7 @@ static int __start_server(int type, int protocol, const struct sockaddr *addr,
|
||||
if (reuseport &&
|
||||
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) {
|
||||
log_err("Failed to set SO_REUSEPORT");
|
||||
return -1;
|
||||
goto error_close;
|
||||
}
|
||||
|
||||
if (bind(fd, addr, addrlen) < 0) {
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Bytedance */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "test_access_variable_array.skel.h"
|
||||
|
||||
void test_access_variable_array(void)
|
||||
{
|
||||
struct test_access_variable_array *skel;
|
||||
|
||||
skel = test_access_variable_array__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load"))
|
||||
return;
|
||||
|
||||
test_access_variable_array__destroy(skel);
|
||||
}
|
||||
@@ -575,14 +575,14 @@ static struct bpf_align_test tests[] = {
|
||||
/* New unknown value in R7 is (4n), >= 76 */
|
||||
{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
|
||||
/* Adding it to packet pointer gives nice bounds again */
|
||||
{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
|
||||
{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
|
||||
* which is 2. Then the variable offset is (4n+2), so
|
||||
* the total offset is 4-byte aligned and meets the
|
||||
* load's requirements.
|
||||
*/
|
||||
{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
|
||||
{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -660,16 +660,22 @@ static int do_test_single(struct bpf_align_test *test)
|
||||
* func#0 @0
|
||||
* 0: R1=ctx(off=0,imm=0) R10=fp0
|
||||
* 0: (b7) r3 = 2 ; R3_w=2
|
||||
*
|
||||
* Sometimes it's actually two lines below, e.g. when
|
||||
* searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
|
||||
* from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
|
||||
* 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
|
||||
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
|
||||
*/
|
||||
if (!strstr(line_ptr, m.match)) {
|
||||
while (!strstr(line_ptr, m.match)) {
|
||||
cur_line = -1;
|
||||
line_ptr = strtok(NULL, "\n");
|
||||
sscanf(line_ptr, "%u: ", &cur_line);
|
||||
sscanf(line_ptr ?: "", "%u: ", &cur_line);
|
||||
if (!line_ptr || cur_line != m.line)
|
||||
break;
|
||||
}
|
||||
if (cur_line != m.line || !line_ptr ||
|
||||
!strstr(line_ptr, m.match)) {
|
||||
printf("Failed to find match %u: %s\n",
|
||||
m.line, m.match);
|
||||
if (cur_line != m.line || !line_ptr || !strstr(line_ptr, m.match)) {
|
||||
printf("Failed to find match %u: %s\n", m.line, m.match);
|
||||
ret = 1;
|
||||
printf("%s", bpf_vlog);
|
||||
break;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include "test_attach_kprobe_sleepable.skel.h"
|
||||
#include "test_attach_probe_manual.skel.h"
|
||||
#include "test_attach_probe.skel.h"
|
||||
|
||||
/* this is how USDT semaphore is actually defined, except volatile modifier */
|
||||
@@ -23,81 +25,54 @@ static noinline void trigger_func3(void)
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
/* attach point for ref_ctr */
|
||||
static noinline void trigger_func4(void)
|
||||
{
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
static char test_data[] = "test_data";
|
||||
|
||||
void test_attach_probe(void)
|
||||
/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
|
||||
static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
|
||||
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
|
||||
struct bpf_link *kprobe_link, *kretprobe_link;
|
||||
struct bpf_link *uprobe_link, *uretprobe_link;
|
||||
struct test_attach_probe* skel;
|
||||
ssize_t uprobe_offset, ref_ctr_offset;
|
||||
struct bpf_link *uprobe_err_link;
|
||||
FILE *devnull;
|
||||
bool legacy;
|
||||
struct test_attach_probe_manual *skel;
|
||||
ssize_t uprobe_offset;
|
||||
|
||||
/* Check if new-style kprobe/uprobe API is supported.
|
||||
* Kernels that support new FD-based kprobe and uprobe BPF attachment
|
||||
* through perf_event_open() syscall expose
|
||||
* /sys/bus/event_source/devices/kprobe/type and
|
||||
* /sys/bus/event_source/devices/uprobe/type files, respectively. They
|
||||
* contain magic numbers that are passed as "type" field of
|
||||
* perf_event_attr. Lack of such file in the system indicates legacy
|
||||
* kernel with old-style kprobe/uprobe attach interface through
|
||||
* creating per-probe event through tracefs. For such cases
|
||||
* ref_ctr_offset feature is not supported, so we don't test it.
|
||||
*/
|
||||
legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0;
|
||||
skel = test_attach_probe_manual__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
|
||||
return;
|
||||
|
||||
uprobe_offset = get_uprobe_offset(&trigger_func);
|
||||
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
|
||||
return;
|
||||
|
||||
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
|
||||
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
|
||||
return;
|
||||
|
||||
skel = test_attach_probe__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
/* sleepable kprobe test case needs flags set before loading */
|
||||
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
|
||||
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
|
||||
goto cleanup;
|
||||
|
||||
/* manual-attach kprobe/kretprobe */
|
||||
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
|
||||
false /* retprobe */,
|
||||
SYS_NANOSLEEP_KPROBE_NAME);
|
||||
kprobe_opts.attach_mode = attach_mode;
|
||||
kprobe_opts.retprobe = false;
|
||||
kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
|
||||
SYS_NANOSLEEP_KPROBE_NAME,
|
||||
&kprobe_opts);
|
||||
if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
|
||||
goto cleanup;
|
||||
skel->links.handle_kprobe = kprobe_link;
|
||||
|
||||
kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
|
||||
true /* retprobe */,
|
||||
SYS_NANOSLEEP_KPROBE_NAME);
|
||||
kprobe_opts.retprobe = true;
|
||||
kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
|
||||
SYS_NANOSLEEP_KPROBE_NAME,
|
||||
&kprobe_opts);
|
||||
if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
|
||||
goto cleanup;
|
||||
skel->links.handle_kretprobe = kretprobe_link;
|
||||
|
||||
/* auto-attachable kprobe and kretprobe */
|
||||
skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
|
||||
ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
|
||||
|
||||
skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
|
||||
ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
|
||||
|
||||
if (!legacy)
|
||||
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
|
||||
|
||||
/* manual-attach uprobe/uretprobe */
|
||||
uprobe_opts.attach_mode = attach_mode;
|
||||
uprobe_opts.ref_ctr_offset = 0;
|
||||
uprobe_opts.retprobe = false;
|
||||
uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
|
||||
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
|
||||
0 /* self pid */,
|
||||
"/proc/self/exe",
|
||||
@@ -107,12 +82,7 @@ void test_attach_probe(void)
|
||||
goto cleanup;
|
||||
skel->links.handle_uprobe = uprobe_link;
|
||||
|
||||
if (!legacy)
|
||||
ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
|
||||
|
||||
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
|
||||
uprobe_opts.retprobe = true;
|
||||
uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
|
||||
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
|
||||
-1 /* any pid */,
|
||||
"/proc/self/exe",
|
||||
@@ -121,12 +91,7 @@ void test_attach_probe(void)
|
||||
goto cleanup;
|
||||
skel->links.handle_uretprobe = uretprobe_link;
|
||||
|
||||
/* verify auto-attach fails for old-style uprobe definition */
|
||||
uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
|
||||
if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
|
||||
"auto-attach should fail for old-style name"))
|
||||
goto cleanup;
|
||||
|
||||
/* attach uprobe by function name manually */
|
||||
uprobe_opts.func_name = "trigger_func2";
|
||||
uprobe_opts.retprobe = false;
|
||||
uprobe_opts.ref_ctr_offset = 0;
|
||||
@@ -138,11 +103,63 @@ void test_attach_probe(void)
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
|
||||
goto cleanup;
|
||||
|
||||
/* trigger & validate kprobe && kretprobe */
|
||||
usleep(1);
|
||||
|
||||
/* trigger & validate uprobe & uretprobe */
|
||||
trigger_func();
|
||||
|
||||
/* trigger & validate uprobe attached by name */
|
||||
trigger_func2();
|
||||
|
||||
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
|
||||
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
|
||||
|
||||
cleanup:
|
||||
test_attach_probe_manual__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_attach_probe_auto(struct test_attach_probe *skel)
|
||||
{
|
||||
struct bpf_link *uprobe_err_link;
|
||||
|
||||
/* auto-attachable kprobe and kretprobe */
|
||||
skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
|
||||
ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
|
||||
|
||||
skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
|
||||
ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
|
||||
|
||||
/* verify auto-attach fails for old-style uprobe definition */
|
||||
uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
|
||||
if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
|
||||
"auto-attach should fail for old-style name"))
|
||||
return;
|
||||
|
||||
/* verify auto-attach works */
|
||||
skel->links.handle_uretprobe_byname =
|
||||
bpf_program__attach(skel->progs.handle_uretprobe_byname);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
|
||||
goto cleanup;
|
||||
return;
|
||||
|
||||
/* trigger & validate kprobe && kretprobe */
|
||||
usleep(1);
|
||||
|
||||
/* trigger & validate uprobe attached by name */
|
||||
trigger_func2();
|
||||
|
||||
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
|
||||
ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
|
||||
}
|
||||
|
||||
static void test_uprobe_lib(struct test_attach_probe *skel)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
|
||||
FILE *devnull;
|
||||
|
||||
/* test attach by name for a library function, using the library
|
||||
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
|
||||
@@ -155,7 +172,7 @@ void test_attach_probe(void)
|
||||
"libc.so.6",
|
||||
0, &uprobe_opts);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
|
||||
goto cleanup;
|
||||
return;
|
||||
|
||||
uprobe_opts.func_name = "fclose";
|
||||
uprobe_opts.retprobe = true;
|
||||
@@ -165,62 +182,144 @@ void test_attach_probe(void)
|
||||
"libc.so.6",
|
||||
0, &uprobe_opts);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
|
||||
goto cleanup;
|
||||
|
||||
/* sleepable kprobes should not attach successfully */
|
||||
skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
|
||||
if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
/* test sleepable uprobe and uretprobe variants */
|
||||
skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
|
||||
goto cleanup;
|
||||
|
||||
skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
|
||||
goto cleanup;
|
||||
|
||||
skel->bss->user_ptr = test_data;
|
||||
|
||||
/* trigger & validate kprobe && kretprobe */
|
||||
usleep(1);
|
||||
return;
|
||||
|
||||
/* trigger & validate shared library u[ret]probes attached by name */
|
||||
devnull = fopen("/dev/null", "r");
|
||||
fclose(devnull);
|
||||
|
||||
/* trigger & validate uprobe & uretprobe */
|
||||
trigger_func();
|
||||
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
|
||||
}
|
||||
|
||||
/* trigger & validate uprobe attached by name */
|
||||
trigger_func2();
|
||||
static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
|
||||
struct bpf_link *uprobe_link, *uretprobe_link;
|
||||
ssize_t uprobe_offset, ref_ctr_offset;
|
||||
|
||||
uprobe_offset = get_uprobe_offset(&trigger_func4);
|
||||
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
|
||||
return;
|
||||
|
||||
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
|
||||
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
|
||||
return;
|
||||
|
||||
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
|
||||
|
||||
uprobe_opts.retprobe = false;
|
||||
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
|
||||
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
|
||||
0 /* self pid */,
|
||||
"/proc/self/exe",
|
||||
uprobe_offset,
|
||||
&uprobe_opts);
|
||||
if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
|
||||
return;
|
||||
skel->links.handle_uprobe_ref_ctr = uprobe_link;
|
||||
|
||||
ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
|
||||
|
||||
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
|
||||
uprobe_opts.retprobe = true;
|
||||
uprobe_opts.ref_ctr_offset = ref_ctr_offset;
|
||||
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
|
||||
-1 /* any pid */,
|
||||
"/proc/self/exe",
|
||||
uprobe_offset, &uprobe_opts);
|
||||
if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
|
||||
return;
|
||||
skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
|
||||
}
|
||||
|
||||
static void test_kprobe_sleepable(void)
|
||||
{
|
||||
struct test_attach_kprobe_sleepable *skel;
|
||||
|
||||
skel = test_attach_kprobe_sleepable__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
|
||||
return;
|
||||
|
||||
/* sleepable kprobe test case needs flags set before loading */
|
||||
if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
|
||||
BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
|
||||
"skel_kprobe_sleepable_load"))
|
||||
goto cleanup;
|
||||
|
||||
/* sleepable kprobes should not attach successfully */
|
||||
skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
|
||||
ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
|
||||
|
||||
cleanup:
|
||||
test_attach_kprobe_sleepable__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_uprobe_sleepable(struct test_attach_probe *skel)
|
||||
{
|
||||
/* test sleepable uprobe and uretprobe variants */
|
||||
skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
|
||||
return;
|
||||
|
||||
skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
|
||||
return;
|
||||
|
||||
skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
|
||||
return;
|
||||
|
||||
skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
|
||||
return;
|
||||
|
||||
skel->bss->user_ptr = test_data;
|
||||
|
||||
/* trigger & validate sleepable uprobe attached by name */
|
||||
trigger_func3();
|
||||
|
||||
ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
|
||||
ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
|
||||
ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
|
||||
ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
|
||||
ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
|
||||
}
|
||||
|
||||
void test_attach_probe(void)
|
||||
{
|
||||
struct test_attach_probe *skel;
|
||||
|
||||
skel = test_attach_probe__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
|
||||
goto cleanup;
|
||||
|
||||
if (test__start_subtest("manual-default"))
|
||||
test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
|
||||
if (test__start_subtest("manual-legacy"))
|
||||
test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
|
||||
if (test__start_subtest("manual-perf"))
|
||||
test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
|
||||
if (test__start_subtest("manual-link"))
|
||||
test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
|
||||
|
||||
if (test__start_subtest("auto"))
|
||||
test_attach_probe_auto(skel);
|
||||
if (test__start_subtest("kprobe-sleepable"))
|
||||
test_kprobe_sleepable();
|
||||
if (test__start_subtest("uprobe-lib"))
|
||||
test_uprobe_lib(skel);
|
||||
if (test__start_subtest("uprobe-sleepable"))
|
||||
test_uprobe_sleepable(skel);
|
||||
if (test__start_subtest("uprobe-ref_ctr"))
|
||||
test_uprobe_ref_ctr(skel);
|
||||
|
||||
cleanup:
|
||||
test_attach_probe__destroy(skel);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "bpf_dctcp.skel.h"
|
||||
#include "bpf_cubic.skel.h"
|
||||
#include "bpf_tcp_nogpl.skel.h"
|
||||
#include "tcp_ca_update.skel.h"
|
||||
#include "bpf_dctcp_release.skel.h"
|
||||
#include "tcp_ca_write_sk_pacing.skel.h"
|
||||
#include "tcp_ca_incompl_cong_ops.skel.h"
|
||||
@@ -381,6 +382,155 @@ static void test_unsupp_cong_op(void)
|
||||
libbpf_set_print(old_print_fn);
|
||||
}
|
||||
|
||||
static void test_update_ca(void)
|
||||
{
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
int saved_ca1_cnt;
|
||||
int err;
|
||||
|
||||
skel = tcp_ca_update__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
saved_ca1_cnt = skel->bss->ca1_cnt;
|
||||
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_update_2);
|
||||
ASSERT_OK(err, "update_map");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
|
||||
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
tcp_ca_update__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_update_wrong(void)
|
||||
{
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
int saved_ca1_cnt;
|
||||
int err;
|
||||
|
||||
skel = tcp_ca_update__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
saved_ca1_cnt = skel->bss->ca1_cnt;
|
||||
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_wrong);
|
||||
ASSERT_ERR(err, "update_map");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
tcp_ca_update__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_mixed_links(void)
|
||||
{
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link, *link_nl;
|
||||
int err;
|
||||
|
||||
skel = tcp_ca_update__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open"))
|
||||
return;
|
||||
|
||||
link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
|
||||
ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_no_link);
|
||||
ASSERT_ERR(err, "update_map");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
bpf_link__destroy(link_nl);
|
||||
tcp_ca_update__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_multi_links(void)
|
||||
{
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
skel = tcp_ca_update__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops_1st");
|
||||
bpf_link__destroy(link);
|
||||
|
||||
/* A map should be able to be used to create links multiple
|
||||
* times.
|
||||
*/
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
|
||||
bpf_link__destroy(link);
|
||||
|
||||
tcp_ca_update__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_link_replace(void)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
int err;
|
||||
|
||||
skel = tcp_ca_update__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops_1st");
|
||||
bpf_link__destroy(link);
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
|
||||
|
||||
/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
|
||||
*
|
||||
* With BPF_F_REPLACE, the link should be updated only if the
|
||||
* old map fd given here matches the map backing the link.
|
||||
*/
|
||||
opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1);
|
||||
opts.flags = BPF_F_REPLACE;
|
||||
err = bpf_link_update(bpf_link__fd(link),
|
||||
bpf_map__fd(skel->maps.ca_update_1),
|
||||
&opts);
|
||||
ASSERT_ERR(err, "bpf_link_update_fail");
|
||||
|
||||
/* BPF_F_REPLACE with a correct old map Fd. It should success! */
|
||||
opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2);
|
||||
err = bpf_link_update(bpf_link__fd(link),
|
||||
bpf_map__fd(skel->maps.ca_update_1),
|
||||
&opts);
|
||||
ASSERT_OK(err, "bpf_link_update_success");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
|
||||
tcp_ca_update__destroy(skel);
|
||||
}
|
||||
|
||||
void test_bpf_tcp_ca(void)
|
||||
{
|
||||
if (test__start_subtest("dctcp"))
|
||||
@@ -399,4 +549,14 @@ void test_bpf_tcp_ca(void)
|
||||
test_incompl_cong_ops();
|
||||
if (test__start_subtest("unsupp_cong_op"))
|
||||
test_unsupp_cong_op();
|
||||
if (test__start_subtest("update_ca"))
|
||||
test_update_ca();
|
||||
if (test__start_subtest("update_wrong"))
|
||||
test_update_wrong();
|
||||
if (test__start_subtest("mixed_links"))
|
||||
test_mixed_links();
|
||||
if (test__start_subtest("multi_links"))
|
||||
test_multi_links();
|
||||
if (test__start_subtest("link_replace"))
|
||||
test_link_replace();
|
||||
}
|
||||
|
||||
@@ -144,6 +144,12 @@ void test_verif_scale_pyperf600_nounroll()
|
||||
scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
||||
}
|
||||
|
||||
void test_verif_scale_pyperf600_iter()
|
||||
{
|
||||
/* open-coded BPF iterator version */
|
||||
scale_test("pyperf600_iter.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
||||
}
|
||||
|
||||
void test_verif_scale_loop1()
|
||||
{
|
||||
scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
||||
|
||||
@@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)
|
||||
|
||||
static bool connect_send(const char *cgroup_path)
|
||||
{
|
||||
bool res = true;
|
||||
int server_fd = -1, client_fd = -1;
|
||||
char message[] = "message";
|
||||
bool res = true;
|
||||
|
||||
if (join_cgroup(cgroup_path))
|
||||
goto out_clean;
|
||||
@@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
|
||||
if (client_fd < 0)
|
||||
goto out_clean;
|
||||
|
||||
if (send(client_fd, "message", strlen("message"), 0) < 0)
|
||||
if (send(client_fd, &message, sizeof(message), 0) < 0)
|
||||
goto out_clean;
|
||||
|
||||
if (read(server_fd, &message, sizeof(message)) < 0)
|
||||
goto out_clean;
|
||||
|
||||
res = false;
|
||||
|
||||
@@ -84,6 +84,7 @@ static const char * const success_tests[] = {
|
||||
"test_cgrp_xchg_release",
|
||||
"test_cgrp_get_release",
|
||||
"test_cgrp_get_ancestors",
|
||||
"test_cgrp_from_id",
|
||||
};
|
||||
|
||||
void test_cgrp_kfunc(void)
|
||||
|
||||
@@ -193,7 +193,7 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
|
||||
cgrp_ls_sleepable__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_no_rcu_lock(__u64 cgroup_id)
|
||||
static void test_yes_rcu_lock(__u64 cgroup_id)
|
||||
{
|
||||
struct cgrp_ls_sleepable *skel;
|
||||
int err;
|
||||
@@ -204,7 +204,7 @@ static void test_no_rcu_lock(__u64 cgroup_id)
|
||||
|
||||
skel->bss->target_pid = syscall(SYS_gettid);
|
||||
|
||||
bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
|
||||
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
|
||||
err = cgrp_ls_sleepable__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto out;
|
||||
@@ -220,7 +220,7 @@ static void test_no_rcu_lock(__u64 cgroup_id)
|
||||
cgrp_ls_sleepable__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rcu_lock(void)
|
||||
static void test_no_rcu_lock(void)
|
||||
{
|
||||
struct cgrp_ls_sleepable *skel;
|
||||
int err;
|
||||
@@ -229,7 +229,7 @@ static void test_rcu_lock(void)
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
|
||||
bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
|
||||
err = cgrp_ls_sleepable__load(skel);
|
||||
ASSERT_ERR(err, "skel_load");
|
||||
|
||||
@@ -256,10 +256,10 @@ void test_cgrp_local_storage(void)
|
||||
test_negative();
|
||||
if (test__start_subtest("cgroup_iter_sleepable"))
|
||||
test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
|
||||
if (test__start_subtest("yes_rcu_lock"))
|
||||
test_yes_rcu_lock(cgroup_id);
|
||||
if (test__start_subtest("no_rcu_lock"))
|
||||
test_no_rcu_lock(cgroup_id);
|
||||
if (test__start_subtest("rcu_lock"))
|
||||
test_rcu_lock();
|
||||
test_no_rcu_lock();
|
||||
|
||||
close(cgroup_fd);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
|
||||
#include "progs/test_cls_redirect.h"
|
||||
#include "test_cls_redirect.skel.h"
|
||||
#include "test_cls_redirect_dynptr.skel.h"
|
||||
#include "test_cls_redirect_subprogs.skel.h"
|
||||
|
||||
#define ENCAP_IP INADDR_LOOPBACK
|
||||
@@ -446,6 +447,28 @@ static void test_cls_redirect_common(struct bpf_program *prog)
|
||||
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
|
||||
}
|
||||
|
||||
static void test_cls_redirect_dynptr(void)
|
||||
{
|
||||
struct test_cls_redirect_dynptr *skel;
|
||||
int err;
|
||||
|
||||
skel = test_cls_redirect_dynptr__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
|
||||
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
|
||||
|
||||
err = test_cls_redirect_dynptr__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
test_cls_redirect_common(skel->progs.cls_redirect);
|
||||
|
||||
cleanup:
|
||||
test_cls_redirect_dynptr__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_cls_redirect_inlined(void)
|
||||
{
|
||||
struct test_cls_redirect *skel;
|
||||
@@ -496,4 +519,6 @@ void test_cls_redirect(void)
|
||||
test_cls_redirect_inlined();
|
||||
if (test__start_subtest("cls_redirect_subprogs"))
|
||||
test_cls_redirect_subprogs();
|
||||
if (test__start_subtest("cls_redirect_dynptr"))
|
||||
test_cls_redirect_dynptr();
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ static const char * const cpumask_success_testcases[] = {
|
||||
"test_copy_any_anyand",
|
||||
"test_insert_leave",
|
||||
"test_insert_remove_release",
|
||||
"test_insert_kptr_get_release",
|
||||
"test_global_mask_rcu",
|
||||
};
|
||||
|
||||
static void verify_success(const char *prog_name)
|
||||
|
||||
917
tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
Normal file
917
tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
Normal file
@@ -0,0 +1,917 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <regex.h>
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "bpf/btf.h"
|
||||
#include "bpf_util.h"
|
||||
#include "linux/filter.h"
|
||||
#include "disasm.h"
|
||||
|
||||
#define MAX_PROG_TEXT_SZ (32 * 1024)
|
||||
|
||||
/* The code in this file serves the sole purpose of executing test cases
|
||||
* specified in the test_cases array. Each test case specifies a program
|
||||
* type, context field offset, and disassembly patterns that correspond
|
||||
* to read and write instructions generated by
|
||||
* verifier.c:convert_ctx_access() for accessing that field.
|
||||
*
|
||||
* For each test case, up to three programs are created:
|
||||
* - One that uses BPF_LDX_MEM to read the context field.
|
||||
* - One that uses BPF_STX_MEM to write to the context field.
|
||||
* - One that uses BPF_ST_MEM to write to the context field.
|
||||
*
|
||||
* The disassembly of each program is then compared with the pattern
|
||||
* specified in the test case.
|
||||
*/
|
||||
struct test_case {
|
||||
char *name;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
int field_offset;
|
||||
int field_sz;
|
||||
/* Program generated for BPF_ST_MEM uses value 42 by default,
|
||||
* this field allows to specify custom value.
|
||||
*/
|
||||
struct {
|
||||
bool use;
|
||||
int value;
|
||||
} st_value;
|
||||
/* Pattern for BPF_LDX_MEM(field_sz, dst, ctx, field_offset) */
|
||||
char *read;
|
||||
/* Pattern for BPF_STX_MEM(field_sz, ctx, src, field_offset) and
|
||||
* BPF_ST_MEM (field_sz, ctx, src, field_offset)
|
||||
*/
|
||||
char *write;
|
||||
/* Pattern for BPF_ST_MEM(field_sz, ctx, src, field_offset),
|
||||
* takes priority over `write`.
|
||||
*/
|
||||
char *write_st;
|
||||
/* Pattern for BPF_STX_MEM (field_sz, ctx, src, field_offset),
|
||||
* takes priority over `write`.
|
||||
*/
|
||||
char *write_stx;
|
||||
};
|
||||
|
||||
#define N(_prog_type, type, field, name_extra...) \
|
||||
.name = #_prog_type "." #field name_extra, \
|
||||
.prog_type = BPF_PROG_TYPE_##_prog_type, \
|
||||
.field_offset = offsetof(type, field), \
|
||||
.field_sz = sizeof(typeof(((type *)NULL)->field))
|
||||
|
||||
static struct test_case test_cases[] = {
|
||||
/* Sign extension on s390 changes the pattern */
|
||||
#if defined(__x86_64__) || defined(__aarch64__)
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, tstamp),
|
||||
.read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
|
||||
"w11 &= 3;"
|
||||
"if w11 != 0x3 goto pc+2;"
|
||||
"$dst = 0;"
|
||||
"goto pc+1;"
|
||||
"$dst = *(u64 *)($ctx + sk_buff::tstamp);",
|
||||
.write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
|
||||
"if w11 & 0x2 goto pc+1;"
|
||||
"goto pc+2;"
|
||||
"w11 &= -2;"
|
||||
"*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;"
|
||||
"*(u64 *)($ctx + sk_buff::tstamp) = $src;",
|
||||
},
|
||||
#endif
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, priority),
|
||||
.read = "$dst = *(u32 *)($ctx + sk_buff::priority);",
|
||||
.write = "*(u32 *)($ctx + sk_buff::priority) = $src;",
|
||||
},
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, mark),
|
||||
.read = "$dst = *(u32 *)($ctx + sk_buff::mark);",
|
||||
.write = "*(u32 *)($ctx + sk_buff::mark) = $src;",
|
||||
},
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, cb[0]),
|
||||
.read = "$dst = *(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data));",
|
||||
.write = "*(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data)) = $src;",
|
||||
},
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, tc_classid),
|
||||
.read = "$dst = *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid));",
|
||||
.write = "*(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;",
|
||||
},
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, tc_index),
|
||||
.read = "$dst = *(u16 *)($ctx + sk_buff::tc_index);",
|
||||
.write = "*(u16 *)($ctx + sk_buff::tc_index) = $src;",
|
||||
},
|
||||
{
|
||||
N(SCHED_CLS, struct __sk_buff, queue_mapping),
|
||||
.read = "$dst = *(u16 *)($ctx + sk_buff::queue_mapping);",
|
||||
.write_stx = "if $src >= 0xffff goto pc+1;"
|
||||
"*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
|
||||
.write_st = "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
|
||||
},
|
||||
{
|
||||
/* This is a corner case in filter.c:bpf_convert_ctx_access() */
|
||||
N(SCHED_CLS, struct __sk_buff, queue_mapping, ".ushrt_max"),
|
||||
.st_value = { true, USHRT_MAX },
|
||||
.write_st = "goto pc+0;",
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCK, struct bpf_sock, bound_dev_if),
|
||||
.read = "$dst = *(u32 *)($ctx + sock_common::skc_bound_dev_if);",
|
||||
.write = "*(u32 *)($ctx + sock_common::skc_bound_dev_if) = $src;",
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCK, struct bpf_sock, mark),
|
||||
.read = "$dst = *(u32 *)($ctx + sock::sk_mark);",
|
||||
.write = "*(u32 *)($ctx + sock::sk_mark) = $src;",
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCK, struct bpf_sock, priority),
|
||||
.read = "$dst = *(u32 *)($ctx + sock::sk_priority);",
|
||||
.write = "*(u32 *)($ctx + sock::sk_priority) = $src;",
|
||||
},
|
||||
{
|
||||
N(SOCK_OPS, struct bpf_sock_ops, replylong[0]),
|
||||
.read = "$dst = *(u32 *)($ctx + bpf_sock_ops_kern::replylong);",
|
||||
.write = "*(u32 *)($ctx + bpf_sock_ops_kern::replylong) = $src;",
|
||||
},
|
||||
{
|
||||
N(CGROUP_SYSCTL, struct bpf_sysctl, file_pos),
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
|
||||
"$dst = *(u32 *)($dst +0);",
|
||||
.write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
|
||||
"*(u32 *)(r9 +0) = $src;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
|
||||
#else
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
|
||||
"$dst = *(u32 *)($dst +4);",
|
||||
.write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
|
||||
"*(u32 *)(r9 +4) = $src;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
|
||||
#endif
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, sk),
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::sk);",
|
||||
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, level),
|
||||
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::level);",
|
||||
.write = "*(u32 *)($ctx + bpf_sockopt_kern::level) = $src;",
|
||||
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, optname),
|
||||
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optname);",
|
||||
.write = "*(u32 *)($ctx + bpf_sockopt_kern::optname) = $src;",
|
||||
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, optlen),
|
||||
.read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optlen);",
|
||||
.write = "*(u32 *)($ctx + bpf_sockopt_kern::optlen) = $src;",
|
||||
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, retval),
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
|
||||
"$dst = *(u64 *)($dst + task_struct::bpf_ctx);"
|
||||
"$dst = *(u32 *)($dst + bpf_cg_run_ctx::retval);",
|
||||
.write = "*(u64 *)($ctx + bpf_sockopt_kern::tmp_reg) = r9;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
|
||||
"r9 = *(u64 *)(r9 + task_struct::bpf_ctx);"
|
||||
"*(u32 *)(r9 + bpf_cg_run_ctx::retval) = $src;"
|
||||
"r9 = *(u64 *)($ctx + bpf_sockopt_kern::tmp_reg);",
|
||||
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, optval),
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval);",
|
||||
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
},
|
||||
{
|
||||
N(CGROUP_SOCKOPT, struct bpf_sockopt, optval_end),
|
||||
.read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval_end);",
|
||||
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
|
||||
},
|
||||
};
|
||||
|
||||
#undef N
|
||||
|
||||
static regex_t *ident_regex;
|
||||
static regex_t *field_regex;
|
||||
|
||||
static char *skip_space(char *str)
|
||||
{
|
||||
while (*str && isspace(*str))
|
||||
++str;
|
||||
return str;
|
||||
}
|
||||
|
||||
static char *skip_space_and_semi(char *str)
|
||||
{
|
||||
while (*str && (isspace(*str) || *str == ';'))
|
||||
++str;
|
||||
return str;
|
||||
}
|
||||
|
||||
static char *match_str(char *str, char *prefix)
|
||||
{
|
||||
while (*str && *prefix && *str == *prefix) {
|
||||
++str;
|
||||
++prefix;
|
||||
}
|
||||
if (*prefix)
|
||||
return NULL;
|
||||
return str;
|
||||
}
|
||||
|
||||
static char *match_number(char *str, int num)
|
||||
{
|
||||
char *next;
|
||||
int snum = strtol(str, &next, 10);
|
||||
|
||||
if (next - str == 0 || num != snum)
|
||||
return NULL;
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
static int find_field_offset_aux(struct btf *btf, int btf_id, char *field_name, int off)
|
||||
{
|
||||
const struct btf_type *type = btf__type_by_id(btf, btf_id);
|
||||
const struct btf_member *m;
|
||||
__u16 mnum;
|
||||
int i;
|
||||
|
||||
if (!type) {
|
||||
PRINT_FAIL("Can't find btf_type for id %d\n", btf_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!btf_is_struct(type) && !btf_is_union(type)) {
|
||||
PRINT_FAIL("BTF id %d is not struct or union\n", btf_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
m = btf_members(type);
|
||||
mnum = btf_vlen(type);
|
||||
|
||||
for (i = 0; i < mnum; ++i, ++m) {
|
||||
const char *mname = btf__name_by_offset(btf, m->name_off);
|
||||
|
||||
if (strcmp(mname, "") == 0) {
|
||||
int msize = find_field_offset_aux(btf, m->type, field_name,
|
||||
off + m->offset);
|
||||
if (msize >= 0)
|
||||
return msize;
|
||||
}
|
||||
|
||||
if (strcmp(mname, field_name))
|
||||
continue;
|
||||
|
||||
return (off + m->offset) / 8;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int find_field_offset(struct btf *btf, char *pattern, regmatch_t *matches)
|
||||
{
|
||||
int type_sz = matches[1].rm_eo - matches[1].rm_so;
|
||||
int field_sz = matches[2].rm_eo - matches[2].rm_so;
|
||||
char *type = pattern + matches[1].rm_so;
|
||||
char *field = pattern + matches[2].rm_so;
|
||||
char field_str[128] = {};
|
||||
char type_str[128] = {};
|
||||
int btf_id, field_offset;
|
||||
|
||||
if (type_sz >= sizeof(type_str)) {
|
||||
PRINT_FAIL("Malformed pattern: type ident is too long: %d\n", type_sz);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (field_sz >= sizeof(field_str)) {
|
||||
PRINT_FAIL("Malformed pattern: field ident is too long: %d\n", field_sz);
|
||||
return -1;
|
||||
}
|
||||
|
||||
strncpy(type_str, type, type_sz);
|
||||
strncpy(field_str, field, field_sz);
|
||||
btf_id = btf__find_by_name(btf, type_str);
|
||||
if (btf_id < 0) {
|
||||
PRINT_FAIL("No BTF info for type %s\n", type_str);
|
||||
return -1;
|
||||
}
|
||||
|
||||
field_offset = find_field_offset_aux(btf, btf_id, field_str, 0);
|
||||
if (field_offset < 0) {
|
||||
PRINT_FAIL("No BTF info for field %s::%s\n", type_str, field_str);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return field_offset;
|
||||
}
|
||||
|
||||
static regex_t *compile_regex(char *pat)
|
||||
{
|
||||
regex_t *re;
|
||||
int err;
|
||||
|
||||
re = malloc(sizeof(regex_t));
|
||||
if (!re) {
|
||||
PRINT_FAIL("Can't alloc regex\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
err = regcomp(re, pat, REG_EXTENDED);
|
||||
if (err) {
|
||||
char errbuf[512];
|
||||
|
||||
regerror(err, re, errbuf, sizeof(errbuf));
|
||||
PRINT_FAIL("Can't compile regex: %s\n", errbuf);
|
||||
free(re);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return re;
|
||||
}
|
||||
|
||||
static void free_regex(regex_t *re)
|
||||
{
|
||||
if (!re)
|
||||
return;
|
||||
|
||||
regfree(re);
|
||||
free(re);
|
||||
}
|
||||
|
||||
static u32 max_line_len(char *str)
|
||||
{
|
||||
u32 max_line = 0;
|
||||
char *next = str;
|
||||
|
||||
while (next) {
|
||||
next = strchr(str, '\n');
|
||||
if (next) {
|
||||
max_line = max_t(u32, max_line, (next - str));
|
||||
str = next + 1;
|
||||
} else {
|
||||
max_line = max_t(u32, max_line, strlen(str));
|
||||
}
|
||||
}
|
||||
|
||||
return min(max_line, 60u);
|
||||
}
|
||||
|
||||
/* Print strings `pattern_origin` and `text_origin` side by side,
|
||||
* assume `pattern_pos` and `text_pos` designate location within
|
||||
* corresponding origin string where match diverges.
|
||||
* The output should look like:
|
||||
*
|
||||
* Can't match disassembly(left) with pattern(right):
|
||||
* r2 = *(u64 *)(r1 +0) ; $dst = *(u64 *)($ctx + bpf_sockopt_kern::sk1)
|
||||
* ^ ^
|
||||
* r0 = 0 ;
|
||||
* exit ;
|
||||
*/
|
||||
static void print_match_error(FILE *out,
|
||||
char *pattern_origin, char *text_origin,
|
||||
char *pattern_pos, char *text_pos)
|
||||
{
|
||||
char *pattern = pattern_origin;
|
||||
char *text = text_origin;
|
||||
int middle = max_line_len(text) + 2;
|
||||
|
||||
fprintf(out, "Can't match disassembly(left) with pattern(right):\n");
|
||||
while (*pattern || *text) {
|
||||
int column = 0;
|
||||
int mark1 = -1;
|
||||
int mark2 = -1;
|
||||
|
||||
/* Print one line from text */
|
||||
while (*text && *text != '\n') {
|
||||
if (text == text_pos)
|
||||
mark1 = column;
|
||||
fputc(*text, out);
|
||||
++text;
|
||||
++column;
|
||||
}
|
||||
if (text == text_pos)
|
||||
mark1 = column;
|
||||
|
||||
/* Pad to the middle */
|
||||
while (column < middle) {
|
||||
fputc(' ', out);
|
||||
++column;
|
||||
}
|
||||
fputs("; ", out);
|
||||
column += 3;
|
||||
|
||||
/* Print one line from pattern, pattern lines are terminated by ';' */
|
||||
while (*pattern && *pattern != ';') {
|
||||
if (pattern == pattern_pos)
|
||||
mark2 = column;
|
||||
fputc(*pattern, out);
|
||||
++pattern;
|
||||
++column;
|
||||
}
|
||||
if (pattern == pattern_pos)
|
||||
mark2 = column;
|
||||
|
||||
fputc('\n', out);
|
||||
if (*pattern)
|
||||
++pattern;
|
||||
if (*text)
|
||||
++text;
|
||||
|
||||
/* If pattern and text diverge at this line, print an
|
||||
* additional line with '^' marks, highlighting
|
||||
* positions where match fails.
|
||||
*/
|
||||
if (mark1 > 0 || mark2 > 0) {
|
||||
for (column = 0; column <= max(mark1, mark2); ++column) {
|
||||
if (column == mark1 || column == mark2)
|
||||
fputc('^', out);
|
||||
else
|
||||
fputc(' ', out);
|
||||
}
|
||||
fputc('\n', out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Test if `text` matches `pattern`. Pattern consists of the following elements:
|
||||
*
|
||||
* - Field offset references:
|
||||
*
|
||||
* <type>::<field>
|
||||
*
|
||||
* When such reference is encountered BTF is used to compute numerical
|
||||
* value for the offset of <field> in <type>. The `text` is expected to
|
||||
* contain matching numerical value.
|
||||
*
|
||||
* - Field groups:
|
||||
*
|
||||
* $(<type>::<field> [+ <type>::<field>]*)
|
||||
*
|
||||
* Allows to specify an offset that is a sum of multiple field offsets.
|
||||
* The `text` is expected to contain matching numerical value.
|
||||
*
|
||||
* - Variable references, e.g. `$src`, `$dst`, `$ctx`.
|
||||
* These are substitutions specified in `reg_map` array.
|
||||
* If a substring of pattern is equal to `reg_map[i][0]` the `text` is
|
||||
* expected to contain `reg_map[i][1]` in the matching position.
|
||||
*
|
||||
* - Whitespace is ignored, ';' counts as whitespace for `pattern`.
|
||||
*
|
||||
* - Any other characters, `pattern` and `text` should match one-to-one.
|
||||
*
|
||||
* Example of a pattern:
|
||||
*
|
||||
* __________ fields group ________________
|
||||
* ' '
|
||||
* *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;
|
||||
* ^^^^ '______________________'
|
||||
* variable reference field offset reference
|
||||
*/
|
||||
static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_map[][2])
|
||||
{
|
||||
char *pattern_origin = pattern;
|
||||
char *text_origin = text;
|
||||
regmatch_t matches[3];
|
||||
|
||||
_continue:
|
||||
while (*pattern) {
|
||||
if (!*text)
|
||||
goto err;
|
||||
|
||||
/* Skip whitespace */
|
||||
if (isspace(*pattern) || *pattern == ';') {
|
||||
if (!isspace(*text) && text != text_origin && isalnum(text[-1]))
|
||||
goto err;
|
||||
pattern = skip_space_and_semi(pattern);
|
||||
text = skip_space(text);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check for variable references */
|
||||
for (int i = 0; reg_map[i][0]; ++i) {
|
||||
char *pattern_next, *text_next;
|
||||
|
||||
pattern_next = match_str(pattern, reg_map[i][0]);
|
||||
if (!pattern_next)
|
||||
continue;
|
||||
|
||||
text_next = match_str(text, reg_map[i][1]);
|
||||
if (!text_next)
|
||||
goto err;
|
||||
|
||||
pattern = pattern_next;
|
||||
text = text_next;
|
||||
goto _continue;
|
||||
}
|
||||
|
||||
/* Match field group:
|
||||
* $(sk_buff::cb + qdisc_skb_cb::tc_classid)
|
||||
*/
|
||||
if (strncmp(pattern, "$(", 2) == 0) {
|
||||
char *group_start = pattern, *text_next;
|
||||
int acc_offset = 0;
|
||||
|
||||
pattern += 2;
|
||||
|
||||
for (;;) {
|
||||
int field_offset;
|
||||
|
||||
pattern = skip_space(pattern);
|
||||
if (!*pattern) {
|
||||
PRINT_FAIL("Unexpected end of pattern\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (*pattern == ')') {
|
||||
++pattern;
|
||||
break;
|
||||
}
|
||||
|
||||
if (*pattern == '+') {
|
||||
++pattern;
|
||||
continue;
|
||||
}
|
||||
|
||||
printf("pattern: %s\n", pattern);
|
||||
if (regexec(field_regex, pattern, 3, matches, 0) != 0) {
|
||||
PRINT_FAIL("Field reference expected\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
field_offset = find_field_offset(btf, pattern, matches);
|
||||
if (field_offset < 0)
|
||||
goto err;
|
||||
|
||||
pattern += matches[0].rm_eo;
|
||||
acc_offset += field_offset;
|
||||
}
|
||||
|
||||
text_next = match_number(text, acc_offset);
|
||||
if (!text_next) {
|
||||
PRINT_FAIL("No match for group offset %.*s (%d)\n",
|
||||
(int)(pattern - group_start),
|
||||
group_start,
|
||||
acc_offset);
|
||||
goto err;
|
||||
}
|
||||
text = text_next;
|
||||
}
|
||||
|
||||
/* Match field reference:
|
||||
* sk_buff::cb
|
||||
*/
|
||||
if (regexec(field_regex, pattern, 3, matches, 0) == 0) {
|
||||
int field_offset;
|
||||
char *text_next;
|
||||
|
||||
field_offset = find_field_offset(btf, pattern, matches);
|
||||
if (field_offset < 0)
|
||||
goto err;
|
||||
|
||||
text_next = match_number(text, field_offset);
|
||||
if (!text_next) {
|
||||
PRINT_FAIL("No match for field offset %.*s (%d)\n",
|
||||
(int)matches[0].rm_eo, pattern, field_offset);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pattern += matches[0].rm_eo;
|
||||
text = text_next;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* If pattern points to identifier not followed by '::'
|
||||
* skip the identifier to avoid n^2 application of the
|
||||
* field reference rule.
|
||||
*/
|
||||
if (regexec(ident_regex, pattern, 1, matches, 0) == 0) {
|
||||
if (strncmp(pattern, text, matches[0].rm_eo) != 0)
|
||||
goto err;
|
||||
|
||||
pattern += matches[0].rm_eo;
|
||||
text += matches[0].rm_eo;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match literally */
|
||||
if (*pattern != *text)
|
||||
goto err;
|
||||
|
||||
++pattern;
|
||||
++text;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
err:
|
||||
test__fail();
|
||||
print_match_error(stdout, pattern_origin, text_origin, pattern, text);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Request BPF program instructions after all rewrites are applied,
|
||||
* e.g. verifier.c:convert_ctx_access() is done.
|
||||
*/
|
||||
static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
|
||||
{
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
__u32 xlated_prog_len;
|
||||
__u32 buf_element_size = sizeof(struct bpf_insn);
|
||||
|
||||
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
|
||||
perror("bpf_prog_get_info_by_fd failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
xlated_prog_len = info.xlated_prog_len;
|
||||
if (xlated_prog_len % buf_element_size) {
|
||||
printf("Program length %d is not multiple of %d\n",
|
||||
xlated_prog_len, buf_element_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*cnt = xlated_prog_len / buf_element_size;
|
||||
*buf = calloc(*cnt, buf_element_size);
|
||||
if (!buf) {
|
||||
perror("can't allocate xlated program buffer");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bzero(&info, sizeof(info));
|
||||
info.xlated_prog_len = xlated_prog_len;
|
||||
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
|
||||
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
|
||||
perror("second bpf_prog_get_info_by_fd failed");
|
||||
goto out_free_buf;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_buf:
|
||||
free(*buf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void print_insn(void *private_data, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vfprintf((FILE *)private_data, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/* Disassemble instructions to a stream */
|
||||
static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn,
|
||||
.cb_call = NULL,
|
||||
.cb_imm = NULL,
|
||||
.private_data = out,
|
||||
};
|
||||
bool double_insn = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (double_insn) {
|
||||
double_insn = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
print_bpf_insn(&cbs, insn + i, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
|
||||
* for each instruction (FF stands for instruction `code` byte).
|
||||
* This function removes the prefix inplace for each line in `str`.
|
||||
*/
|
||||
static void remove_insn_prefix(char *str, int size)
|
||||
{
|
||||
const int prefix_size = 5;
|
||||
|
||||
int write_pos = 0, read_pos = prefix_size;
|
||||
int len = strlen(str);
|
||||
char c;
|
||||
|
||||
size = min(size, len);
|
||||
|
||||
while (read_pos < size) {
|
||||
c = str[read_pos++];
|
||||
if (c == 0)
|
||||
break;
|
||||
str[write_pos++] = c;
|
||||
if (c == '\n')
|
||||
read_pos += prefix_size;
|
||||
}
|
||||
str[write_pos] = 0;
|
||||
}
|
||||
|
||||
struct prog_info {
|
||||
char *prog_kind;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
struct bpf_insn *prog;
|
||||
u32 prog_len;
|
||||
};
|
||||
|
||||
static void match_program(struct btf *btf,
|
||||
struct prog_info *pinfo,
|
||||
char *pattern,
|
||||
char *reg_map[][2],
|
||||
bool skip_first_insn)
|
||||
{
|
||||
struct bpf_insn *buf = NULL;
|
||||
int err = 0, prog_fd = 0;
|
||||
FILE *prog_out = NULL;
|
||||
char *text = NULL;
|
||||
__u32 cnt = 0;
|
||||
|
||||
text = calloc(MAX_PROG_TEXT_SZ, 1);
|
||||
if (!text) {
|
||||
PRINT_FAIL("Can't allocate %d bytes\n", MAX_PROG_TEXT_SZ);
|
||||
goto out;
|
||||
}
|
||||
|
||||
// TODO: log level
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts);
|
||||
opts.log_buf = text;
|
||||
opts.log_size = MAX_PROG_TEXT_SZ;
|
||||
opts.log_level = 1 | 2 | 4;
|
||||
opts.expected_attach_type = pinfo->expected_attach_type;
|
||||
|
||||
prog_fd = bpf_prog_load(pinfo->prog_type, NULL, "GPL",
|
||||
pinfo->prog, pinfo->prog_len, &opts);
|
||||
if (prog_fd < 0) {
|
||||
PRINT_FAIL("Can't load program, errno %d (%s), verifier log:\n%s\n",
|
||||
errno, strerror(errno), text);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(text, 0, MAX_PROG_TEXT_SZ);
|
||||
|
||||
err = get_xlated_program(prog_fd, &buf, &cnt);
|
||||
if (err) {
|
||||
PRINT_FAIL("Can't load back BPF program\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
prog_out = fmemopen(text, MAX_PROG_TEXT_SZ - 1, "w");
|
||||
if (!prog_out) {
|
||||
PRINT_FAIL("Can't open memory stream\n");
|
||||
goto out;
|
||||
}
|
||||
if (skip_first_insn)
|
||||
print_xlated(prog_out, buf + 1, cnt - 1);
|
||||
else
|
||||
print_xlated(prog_out, buf, cnt);
|
||||
fclose(prog_out);
|
||||
remove_insn_prefix(text, MAX_PROG_TEXT_SZ);
|
||||
|
||||
ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map),
|
||||
pinfo->prog_kind);
|
||||
|
||||
out:
|
||||
if (prog_fd)
|
||||
close(prog_fd);
|
||||
free(buf);
|
||||
free(text);
|
||||
}
|
||||
|
||||
static void run_one_testcase(struct btf *btf, struct test_case *test)
|
||||
{
|
||||
struct prog_info pinfo = {};
|
||||
int bpf_sz;
|
||||
|
||||
if (!test__start_subtest(test->name))
|
||||
return;
|
||||
|
||||
switch (test->field_sz) {
|
||||
case 8:
|
||||
bpf_sz = BPF_DW;
|
||||
break;
|
||||
case 4:
|
||||
bpf_sz = BPF_W;
|
||||
break;
|
||||
case 2:
|
||||
bpf_sz = BPF_H;
|
||||
break;
|
||||
case 1:
|
||||
bpf_sz = BPF_B;
|
||||
break;
|
||||
default:
|
||||
PRINT_FAIL("Unexpected field size: %d, want 8,4,2 or 1\n", test->field_sz);
|
||||
return;
|
||||
}
|
||||
|
||||
pinfo.prog_type = test->prog_type;
|
||||
pinfo.expected_attach_type = test->expected_attach_type;
|
||||
|
||||
if (test->read) {
|
||||
struct bpf_insn ldx_prog[] = {
|
||||
BPF_LDX_MEM(bpf_sz, BPF_REG_2, BPF_REG_1, test->field_offset),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
char *reg_map[][2] = {
|
||||
{ "$ctx", "r1" },
|
||||
{ "$dst", "r2" },
|
||||
{}
|
||||
};
|
||||
|
||||
pinfo.prog_kind = "LDX";
|
||||
pinfo.prog = ldx_prog;
|
||||
pinfo.prog_len = ARRAY_SIZE(ldx_prog);
|
||||
match_program(btf, &pinfo, test->read, reg_map, false);
|
||||
}
|
||||
|
||||
if (test->write || test->write_st || test->write_stx) {
|
||||
struct bpf_insn stx_prog[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_STX_MEM(bpf_sz, BPF_REG_1, BPF_REG_2, test->field_offset),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
char *stx_reg_map[][2] = {
|
||||
{ "$ctx", "r1" },
|
||||
{ "$src", "r2" },
|
||||
{}
|
||||
};
|
||||
struct bpf_insn st_prog[] = {
|
||||
BPF_ST_MEM(bpf_sz, BPF_REG_1, test->field_offset,
|
||||
test->st_value.use ? test->st_value.value : 42),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
char *st_reg_map[][2] = {
|
||||
{ "$ctx", "r1" },
|
||||
{ "$src", "42" },
|
||||
{}
|
||||
};
|
||||
|
||||
if (test->write || test->write_stx) {
|
||||
char *pattern = test->write_stx ? test->write_stx : test->write;
|
||||
|
||||
pinfo.prog_kind = "STX";
|
||||
pinfo.prog = stx_prog;
|
||||
pinfo.prog_len = ARRAY_SIZE(stx_prog);
|
||||
match_program(btf, &pinfo, pattern, stx_reg_map, true);
|
||||
}
|
||||
|
||||
if (test->write || test->write_st) {
|
||||
char *pattern = test->write_st ? test->write_st : test->write;
|
||||
|
||||
pinfo.prog_kind = "ST";
|
||||
pinfo.prog = st_prog;
|
||||
pinfo.prog_len = ARRAY_SIZE(st_prog);
|
||||
match_program(btf, &pinfo, pattern, st_reg_map, false);
|
||||
}
|
||||
}
|
||||
|
||||
test__end_subtest();
|
||||
}
|
||||
|
||||
void test_ctx_rewrite(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
int i;
|
||||
|
||||
field_regex = compile_regex("^([[:alpha:]_][[:alnum:]_]+)::([[:alpha:]_][[:alnum:]_]+)");
|
||||
ident_regex = compile_regex("^[[:alpha:]_][[:alnum:]_]+");
|
||||
if (!field_regex || !ident_regex)
|
||||
return;
|
||||
|
||||
btf = btf__load_vmlinux_btf();
|
||||
if (!btf) {
|
||||
PRINT_FAIL("Can't load vmlinux BTF, errno %d (%s)\n", errno, strerror(errno));
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(test_cases); ++i)
|
||||
run_one_testcase(btf, &test_cases[i]);
|
||||
|
||||
out:
|
||||
btf__free(btf);
|
||||
free_regex(field_regex);
|
||||
free_regex(ident_regex);
|
||||
}
|
||||
@@ -10,14 +10,6 @@
|
||||
#include "network_helpers.h"
|
||||
#include "decap_sanity.skel.h"
|
||||
|
||||
#define SYS(fmt, ...) \
|
||||
({ \
|
||||
char cmd[1024]; \
|
||||
snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
|
||||
if (!ASSERT_OK(system(cmd), cmd)) \
|
||||
goto fail; \
|
||||
})
|
||||
|
||||
#define NS_TEST "decap_sanity_ns"
|
||||
#define IPV6_IFACE_ADDR "face::1"
|
||||
#define UDP_TEST_PORT 7777
|
||||
@@ -37,9 +29,9 @@ void test_decap_sanity(void)
|
||||
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
|
||||
return;
|
||||
|
||||
SYS("ip netns add %s", NS_TEST);
|
||||
SYS("ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
|
||||
SYS("ip -net %s link set dev lo up", NS_TEST);
|
||||
SYS(fail, "ip netns add %s", NS_TEST);
|
||||
SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
|
||||
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
|
||||
|
||||
nstoken = open_netns(NS_TEST);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
@@ -80,6 +72,6 @@ void test_decap_sanity(void)
|
||||
bpf_tc_hook_destroy(&qdisc_hook);
|
||||
close_netns(nstoken);
|
||||
}
|
||||
system("ip netns del " NS_TEST " &> /dev/null");
|
||||
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
|
||||
decap_sanity__destroy(skel);
|
||||
}
|
||||
|
||||
@@ -2,20 +2,32 @@
|
||||
/* Copyright (c) 2022 Facebook */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include "dynptr_fail.skel.h"
|
||||
#include "dynptr_success.skel.h"
|
||||
|
||||
static const char * const success_tests[] = {
|
||||
"test_read_write",
|
||||
"test_data_slice",
|
||||
"test_ringbuf",
|
||||
enum test_setup_type {
|
||||
SETUP_SYSCALL_SLEEP,
|
||||
SETUP_SKB_PROG,
|
||||
};
|
||||
|
||||
static void verify_success(const char *prog_name)
|
||||
static struct {
|
||||
const char *prog_name;
|
||||
enum test_setup_type type;
|
||||
} success_tests[] = {
|
||||
{"test_read_write", SETUP_SYSCALL_SLEEP},
|
||||
{"test_dynptr_data", SETUP_SYSCALL_SLEEP},
|
||||
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
|
||||
{"test_skb_readonly", SETUP_SKB_PROG},
|
||||
{"test_dynptr_skb_data", SETUP_SKB_PROG},
|
||||
};
|
||||
|
||||
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
|
||||
{
|
||||
struct dynptr_success *skel;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_link *link;
|
||||
int err;
|
||||
|
||||
skel = dynptr_success__open();
|
||||
if (!ASSERT_OK_PTR(skel, "dynptr_success__open"))
|
||||
@@ -23,24 +35,54 @@ static void verify_success(const char *prog_name)
|
||||
|
||||
skel->bss->pid = getpid();
|
||||
|
||||
dynptr_success__load(skel);
|
||||
if (!ASSERT_OK_PTR(skel, "dynptr_success__load"))
|
||||
goto cleanup;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
|
||||
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
|
||||
goto cleanup;
|
||||
|
||||
link = bpf_program__attach(prog);
|
||||
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
|
||||
bpf_program__set_autoload(prog, true);
|
||||
|
||||
err = dynptr_success__load(skel);
|
||||
if (!ASSERT_OK(err, "dynptr_success__load"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
switch (setup_type) {
|
||||
case SETUP_SYSCALL_SLEEP:
|
||||
link = bpf_program__attach(prog);
|
||||
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
|
||||
bpf_link__destroy(link);
|
||||
break;
|
||||
case SETUP_SKB_PROG:
|
||||
{
|
||||
int prog_fd;
|
||||
char buf[64];
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
|
||||
goto cleanup;
|
||||
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
|
||||
if (!ASSERT_OK(err, "test_run"))
|
||||
goto cleanup;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT_EQ(skel->bss->err, 0, "err");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
|
||||
cleanup:
|
||||
dynptr_success__destroy(skel);
|
||||
}
|
||||
@@ -50,10 +92,10 @@ void test_dynptr(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
|
||||
if (!test__start_subtest(success_tests[i]))
|
||||
if (!test__start_subtest(success_tests[i].prog_name))
|
||||
continue;
|
||||
|
||||
verify_success(success_tests[i]);
|
||||
verify_success(success_tests[i].prog_name, success_tests[i].type);
|
||||
}
|
||||
|
||||
RUN_TESTS(dynptr_fail);
|
||||
|
||||
@@ -4,11 +4,6 @@
|
||||
#include <net/if.h>
|
||||
#include "empty_skb.skel.h"
|
||||
|
||||
#define SYS(cmd) ({ \
|
||||
if (!ASSERT_OK(system(cmd), (cmd))) \
|
||||
goto out; \
|
||||
})
|
||||
|
||||
void test_empty_skb(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, tattr);
|
||||
@@ -93,18 +88,18 @@ void test_empty_skb(void)
|
||||
},
|
||||
};
|
||||
|
||||
SYS("ip netns add empty_skb");
|
||||
SYS(out, "ip netns add empty_skb");
|
||||
tok = open_netns("empty_skb");
|
||||
SYS("ip link add veth0 type veth peer veth1");
|
||||
SYS("ip link set dev veth0 up");
|
||||
SYS("ip link set dev veth1 up");
|
||||
SYS("ip addr add 10.0.0.1/8 dev veth0");
|
||||
SYS("ip addr add 10.0.0.2/8 dev veth1");
|
||||
SYS(out, "ip link add veth0 type veth peer veth1");
|
||||
SYS(out, "ip link set dev veth0 up");
|
||||
SYS(out, "ip link set dev veth1 up");
|
||||
SYS(out, "ip addr add 10.0.0.1/8 dev veth0");
|
||||
SYS(out, "ip addr add 10.0.0.2/8 dev veth1");
|
||||
veth_ifindex = if_nametoindex("veth0");
|
||||
|
||||
SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
|
||||
SYS("ip link set ipip0 up");
|
||||
SYS("ip addr add 192.168.1.1/16 dev ipip0");
|
||||
SYS(out, "ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
|
||||
SYS(out, "ip link set ipip0 up");
|
||||
SYS(out, "ip addr add 192.168.1.1/16 dev ipip0");
|
||||
ipip_ifindex = if_nametoindex("ipip0");
|
||||
|
||||
bpf_obj = empty_skb__open_and_load();
|
||||
@@ -142,5 +137,5 @@ void test_empty_skb(void)
|
||||
empty_skb__destroy(bpf_obj);
|
||||
if (tok)
|
||||
close_netns(tok);
|
||||
system("ip netns del empty_skb");
|
||||
SYS_NOFAIL("ip netns del empty_skb");
|
||||
}
|
||||
|
||||
@@ -8,14 +8,6 @@
|
||||
#include "network_helpers.h"
|
||||
#include "fib_lookup.skel.h"
|
||||
|
||||
#define SYS(fmt, ...) \
|
||||
({ \
|
||||
char cmd[1024]; \
|
||||
snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
|
||||
if (!ASSERT_OK(system(cmd), cmd)) \
|
||||
goto fail; \
|
||||
})
|
||||
|
||||
#define NS_TEST "fib_lookup_ns"
|
||||
#define IPV6_IFACE_ADDR "face::face"
|
||||
#define IPV6_NUD_FAILED_ADDR "face::1"
|
||||
@@ -59,16 +51,24 @@ static int setup_netns(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
SYS("ip link add veth1 type veth peer name veth2");
|
||||
SYS("ip link set dev veth1 up");
|
||||
SYS(fail, "ip link add veth1 type veth peer name veth2");
|
||||
SYS(fail, "ip link set dev veth1 up");
|
||||
|
||||
SYS("ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
|
||||
err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900");
|
||||
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)"))
|
||||
goto fail;
|
||||
|
||||
SYS("ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
|
||||
err = write_sysctl("/proc/sys/net/ipv6/neigh/veth1/gc_stale_time", "900");
|
||||
if (!ASSERT_OK(err, "write_sysctl(net.ipv6.neigh.veth1.gc_stale_time)"))
|
||||
goto fail;
|
||||
|
||||
SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
|
||||
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
|
||||
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
|
||||
|
||||
SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR);
|
||||
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
|
||||
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
|
||||
|
||||
err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
|
||||
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
|
||||
@@ -140,7 +140,7 @@ void test_fib_lookup(void)
|
||||
return;
|
||||
prog_fd = bpf_program__fd(skel->progs.fib_lookup);
|
||||
|
||||
SYS("ip netns add %s", NS_TEST);
|
||||
SYS(fail, "ip netns add %s", NS_TEST);
|
||||
|
||||
nstoken = open_netns(NS_TEST);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
@@ -166,7 +166,7 @@ void test_fib_lookup(void)
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
|
||||
continue;
|
||||
|
||||
ASSERT_EQ(tests[i].expected_ret, skel->bss->fib_lookup_ret,
|
||||
ASSERT_EQ(skel->bss->fib_lookup_ret, tests[i].expected_ret,
|
||||
"fib_lookup_ret");
|
||||
|
||||
ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
|
||||
@@ -182,6 +182,6 @@ void test_fib_lookup(void)
|
||||
fail:
|
||||
if (nstoken)
|
||||
close_netns(nstoken);
|
||||
system("ip netns del " NS_TEST " &> /dev/null");
|
||||
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
|
||||
fib_lookup__destroy(skel);
|
||||
}
|
||||
|
||||
@@ -345,6 +345,30 @@ struct test tests[] = {
|
||||
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
|
||||
.retval = BPF_OK,
|
||||
},
|
||||
{
|
||||
.name = "ipv6-empty-flow-label",
|
||||
.pkt.ipv6 = {
|
||||
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
|
||||
.iph.nexthdr = IPPROTO_TCP,
|
||||
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
|
||||
.iph.flow_lbl = { 0x00, 0x00, 0x00 },
|
||||
.tcp.doff = 5,
|
||||
.tcp.source = 80,
|
||||
.tcp.dest = 8080,
|
||||
},
|
||||
.keys = {
|
||||
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
|
||||
.nhoff = ETH_HLEN,
|
||||
.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
|
||||
.addr_proto = ETH_P_IPV6,
|
||||
.ip_proto = IPPROTO_TCP,
|
||||
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
|
||||
.sport = 80,
|
||||
.dport = 8080,
|
||||
},
|
||||
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
|
||||
.retval = BPF_OK,
|
||||
},
|
||||
{
|
||||
.name = "ipip-encap",
|
||||
.pkt.ipip = {
|
||||
|
||||
@@ -37,8 +37,8 @@ static int create_perf_events(void)
|
||||
|
||||
/* create perf event */
|
||||
attr.size = sizeof(attr);
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.config = 0x1b00;
|
||||
attr.type = PERF_TYPE_HARDWARE;
|
||||
attr.config = PERF_COUNT_HW_CPU_CYCLES;
|
||||
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
|
||||
attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
|
||||
PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
|
||||
|
||||
@@ -65,6 +65,7 @@ void test_get_stackid_cannot_attach(void)
|
||||
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
|
||||
pmu_fd);
|
||||
ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
|
||||
bpf_link__destroy(skel->links.oncpu);
|
||||
close(pmu_fd);
|
||||
|
||||
/* add exclude_callchain_kernel, attach should fail */
|
||||
|
||||
106
tools/testing/selftests/bpf/prog_tests/iters.c
Normal file
106
tools/testing/selftests/bpf/prog_tests/iters.c
Normal file
@@ -0,0 +1,106 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "iters.skel.h"
|
||||
#include "iters_state_safety.skel.h"
|
||||
#include "iters_looping.skel.h"
|
||||
#include "iters_num.skel.h"
|
||||
#include "iters_testmod_seq.skel.h"
|
||||
|
||||
static void subtest_num_iters(void)
|
||||
{
|
||||
struct iters_num *skel;
|
||||
int err;
|
||||
|
||||
skel = iters_num__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
|
||||
return;
|
||||
|
||||
err = iters_num__attach(skel);
|
||||
if (!ASSERT_OK(err, "skel_attach"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
iters_num__detach(skel);
|
||||
|
||||
#define VALIDATE_CASE(case_name) \
|
||||
ASSERT_EQ(skel->bss->res_##case_name, \
|
||||
skel->rodata->exp_##case_name, \
|
||||
#case_name)
|
||||
|
||||
VALIDATE_CASE(empty_zero);
|
||||
VALIDATE_CASE(empty_int_min);
|
||||
VALIDATE_CASE(empty_int_max);
|
||||
VALIDATE_CASE(empty_minus_one);
|
||||
|
||||
VALIDATE_CASE(simple_sum);
|
||||
VALIDATE_CASE(neg_sum);
|
||||
VALIDATE_CASE(very_neg_sum);
|
||||
VALIDATE_CASE(neg_pos_sum);
|
||||
|
||||
VALIDATE_CASE(invalid_range);
|
||||
VALIDATE_CASE(max_range);
|
||||
VALIDATE_CASE(e2big_range);
|
||||
|
||||
VALIDATE_CASE(succ_elem_cnt);
|
||||
VALIDATE_CASE(overfetched_elem_cnt);
|
||||
VALIDATE_CASE(fail_elem_cnt);
|
||||
|
||||
#undef VALIDATE_CASE
|
||||
|
||||
cleanup:
|
||||
iters_num__destroy(skel);
|
||||
}
|
||||
|
||||
static void subtest_testmod_seq_iters(void)
|
||||
{
|
||||
struct iters_testmod_seq *skel;
|
||||
int err;
|
||||
|
||||
if (!env.has_testmod) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
skel = iters_testmod_seq__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
|
||||
return;
|
||||
|
||||
err = iters_testmod_seq__attach(skel);
|
||||
if (!ASSERT_OK(err, "skel_attach"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
iters_testmod_seq__detach(skel);
|
||||
|
||||
#define VALIDATE_CASE(case_name) \
|
||||
ASSERT_EQ(skel->bss->res_##case_name, \
|
||||
skel->rodata->exp_##case_name, \
|
||||
#case_name)
|
||||
|
||||
VALIDATE_CASE(empty);
|
||||
VALIDATE_CASE(full);
|
||||
VALIDATE_CASE(truncated);
|
||||
|
||||
#undef VALIDATE_CASE
|
||||
|
||||
cleanup:
|
||||
iters_testmod_seq__destroy(skel);
|
||||
}
|
||||
|
||||
void test_iters(void)
|
||||
{
|
||||
RUN_TESTS(iters_state_safety);
|
||||
RUN_TESTS(iters_looping);
|
||||
RUN_TESTS(iters);
|
||||
|
||||
if (env.has_testmod)
|
||||
RUN_TESTS(iters_testmod_seq);
|
||||
|
||||
if (test__start_subtest("num"))
|
||||
subtest_num_iters();
|
||||
if (test__start_subtest("testmod_seq"))
|
||||
subtest_testmod_seq_iters();
|
||||
}
|
||||
@@ -338,7 +338,12 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
* Filtering out duplicates by using hashmap__add, which won't
|
||||
* add existing entry.
|
||||
*/
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -376,8 +381,10 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
continue;
|
||||
|
||||
err = hashmap__add(map, name, 0);
|
||||
if (err == -EEXIST)
|
||||
if (err == -EEXIST) {
|
||||
err = 0;
|
||||
continue;
|
||||
}
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
|
||||
@@ -93,4 +93,6 @@ void test_l4lb_all(void)
|
||||
test_l4lb("test_l4lb.bpf.o");
|
||||
if (test__start_subtest("l4lb_noinline"))
|
||||
test_l4lb("test_l4lb_noinline.bpf.o");
|
||||
if (test__start_subtest("l4lb_noinline_dynptr"))
|
||||
test_l4lb("test_l4lb_noinline_dynptr.bpf.o");
|
||||
}
|
||||
|
||||
@@ -84,11 +84,11 @@ static struct {
|
||||
{ "double_push_back", "arg#1 expected pointer to allocated object" },
|
||||
{ "no_node_value_type", "bpf_list_node not found at offset=0" },
|
||||
{ "incorrect_value_type",
|
||||
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
|
||||
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, "
|
||||
"but arg is at offset=0 in struct bar" },
|
||||
{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
|
||||
{ "incorrect_node_off1", "bpf_list_node not found at offset=1" },
|
||||
{ "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
|
||||
{ "incorrect_node_off1", "bpf_list_node not found at offset=41" },
|
||||
{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" },
|
||||
{ "no_head_type", "bpf_list_head not found at offset=0" },
|
||||
{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
|
||||
{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
|
||||
@@ -266,6 +266,59 @@ static struct btf *init_btf(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void list_and_rb_node_same_struct(bool refcount_field)
|
||||
{
|
||||
int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id;
|
||||
struct btf *btf;
|
||||
int id, err;
|
||||
|
||||
btf = init_btf();
|
||||
if (!ASSERT_OK_PTR(btf, "init_btf"))
|
||||
return;
|
||||
|
||||
bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24);
|
||||
if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
|
||||
return;
|
||||
|
||||
if (refcount_field) {
|
||||
bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
|
||||
if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
|
||||
return;
|
||||
}
|
||||
|
||||
id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40);
|
||||
if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
|
||||
return;
|
||||
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::a"))
|
||||
return;
|
||||
err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::c"))
|
||||
return;
|
||||
if (refcount_field) {
|
||||
err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::ref"))
|
||||
return;
|
||||
}
|
||||
|
||||
foo_btf_id = btf__add_struct(btf, "foo", 20);
|
||||
if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
|
||||
return;
|
||||
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::a"))
|
||||
return;
|
||||
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::b"))
|
||||
return;
|
||||
id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
|
||||
if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
|
||||
return;
|
||||
|
||||
err = btf__load_into_kernel(btf);
|
||||
ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
static void test_btf(void)
|
||||
{
|
||||
struct btf *btf = NULL;
|
||||
@@ -717,39 +770,12 @@ static void test_btf(void)
|
||||
}
|
||||
|
||||
while (test__start_subtest("btf: list_node and rb_node in same struct")) {
|
||||
btf = init_btf();
|
||||
if (!ASSERT_OK_PTR(btf, "init_btf"))
|
||||
break;
|
||||
list_and_rb_node_same_struct(true);
|
||||
break;
|
||||
}
|
||||
|
||||
id = btf__add_struct(btf, "bpf_rb_node", 24);
|
||||
if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
|
||||
break;
|
||||
id = btf__add_struct(btf, "bar", 40);
|
||||
if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "c", 5, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::c"))
|
||||
break;
|
||||
|
||||
id = btf__add_struct(btf, "foo", 20);
|
||||
if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::b"))
|
||||
break;
|
||||
id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
|
||||
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
|
||||
break;
|
||||
|
||||
err = btf__load_into_kernel(btf);
|
||||
ASSERT_EQ(err, -EINVAL, "check btf");
|
||||
btf__free(btf);
|
||||
while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
|
||||
list_and_rb_node_same_struct(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
60
tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
Normal file
60
tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
Normal file
@@ -0,0 +1,60 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include "local_kptr_stash.skel.h"
|
||||
static void test_local_kptr_stash_simple(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct local_kptr_stash *skel;
|
||||
int ret;
|
||||
|
||||
skel = local_kptr_stash__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
|
||||
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
|
||||
ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
|
||||
|
||||
local_kptr_stash__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_local_kptr_stash_unstash(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct local_kptr_stash *skel;
|
||||
int ret;
|
||||
|
||||
skel = local_kptr_stash__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
|
||||
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
|
||||
ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.unstash_rb_node), &opts);
|
||||
ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
|
||||
ASSERT_EQ(opts.retval, 42, "local_kptr_stash_add_nodes retval");
|
||||
|
||||
local_kptr_stash__destroy(skel);
|
||||
}
|
||||
|
||||
void test_local_kptr_stash_success(void)
|
||||
{
|
||||
if (test__start_subtest("local_kptr_stash_simple"))
|
||||
test_local_kptr_stash_simple();
|
||||
if (test__start_subtest("local_kptr_stash_unstash"))
|
||||
test_local_kptr_stash_unstash();
|
||||
}
|
||||
@@ -24,6 +24,7 @@ static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
|
||||
bpf_program__set_autoload(skel->progs.bad_relo, true);
|
||||
memset(log_buf, 0, sizeof(log_buf));
|
||||
bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
|
||||
bpf_program__set_log_level(skel->progs.bad_relo, 1 | 8); /* BPF_LOG_FIXED to force truncation */
|
||||
|
||||
err = test_log_fixup__load(skel);
|
||||
if (!ASSERT_ERR(err, "load_fail"))
|
||||
@@ -134,6 +135,35 @@ static void missing_map(void)
|
||||
test_log_fixup__destroy(skel);
|
||||
}
|
||||
|
||||
static void missing_kfunc(void)
|
||||
{
|
||||
char log_buf[8 * 1024];
|
||||
struct test_log_fixup* skel;
|
||||
int err;
|
||||
|
||||
skel = test_log_fixup__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.use_missing_kfunc, true);
|
||||
bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf));
|
||||
|
||||
err = test_log_fixup__load(skel);
|
||||
if (!ASSERT_ERR(err, "load_fail"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_HAS_SUBSTR(log_buf,
|
||||
"0: <invalid kfunc call>\n"
|
||||
"kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n",
|
||||
"log_buf");
|
||||
|
||||
if (env.verbosity > VERBOSE_NONE)
|
||||
printf("LOG: \n=================\n%s=================\n", log_buf);
|
||||
|
||||
cleanup:
|
||||
test_log_fixup__destroy(skel);
|
||||
}
|
||||
|
||||
void test_log_fixup(void)
|
||||
{
|
||||
if (test__start_subtest("bad_core_relo_trunc_none"))
|
||||
@@ -141,9 +171,11 @@ void test_log_fixup(void)
|
||||
if (test__start_subtest("bad_core_relo_trunc_partial"))
|
||||
bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
|
||||
if (test__start_subtest("bad_core_relo_trunc_full"))
|
||||
bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */);
|
||||
bad_core_relo(210, TRUNC_FULL /* truncate also libbpf's message patch */);
|
||||
if (test__start_subtest("bad_core_relo_subprog"))
|
||||
bad_core_relo_subprog();
|
||||
if (test__start_subtest("missing_map"))
|
||||
missing_map();
|
||||
if (test__start_subtest("missing_kfunc"))
|
||||
missing_kfunc();
|
||||
}
|
||||
|
||||
@@ -4,70 +4,160 @@
|
||||
|
||||
#include "map_kptr.skel.h"
|
||||
#include "map_kptr_fail.skel.h"
|
||||
#include "rcu_tasks_trace_gp.skel.h"
|
||||
|
||||
static void test_map_kptr_success(bool test_run)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, lopts);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
int key = 0, ret, cpu;
|
||||
struct map_kptr *skel;
|
||||
int key = 0, ret;
|
||||
char buf[16];
|
||||
char buf[16], *pbuf;
|
||||
|
||||
skel = map_kptr__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref retval");
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref1 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval");
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts);
|
||||
ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount");
|
||||
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval");
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts);
|
||||
ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount");
|
||||
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval");
|
||||
|
||||
if (test_run)
|
||||
goto exit;
|
||||
|
||||
cpu = libbpf_num_possible_cpus();
|
||||
if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
|
||||
goto exit;
|
||||
|
||||
pbuf = calloc(cpu, sizeof(buf));
|
||||
if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)"))
|
||||
goto exit;
|
||||
|
||||
ret = bpf_map__update_elem(skel->maps.array_map,
|
||||
&key, sizeof(key), buf, sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "array_map update");
|
||||
ret = bpf_map__update_elem(skel->maps.array_map,
|
||||
&key, sizeof(key), buf, sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "array_map update2");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__update_elem(skel->maps.pcpu_array_map,
|
||||
&key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "pcpu_array_map update");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__update_elem(skel->maps.hash_map,
|
||||
&key, sizeof(key), buf, sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "hash_map update");
|
||||
ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "hash_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "pcpu_hash_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
|
||||
&key, sizeof(key), buf, sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "hash_malloc_map update");
|
||||
ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "hash_malloc_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "pcpu_hash_malloc_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__update_elem(skel->maps.lru_hash_map,
|
||||
&key, sizeof(key), buf, sizeof(buf), 0);
|
||||
ASSERT_OK(ret, "lru_hash_map update");
|
||||
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "lru_hash_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0);
|
||||
ASSERT_OK(ret, "lru_pcpu_hash_map delete");
|
||||
skel->data->ref--;
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
||||
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
||||
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts);
|
||||
ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete");
|
||||
skel->data->ref--;
|
||||
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval");
|
||||
|
||||
free(pbuf);
|
||||
exit:
|
||||
map_kptr__destroy(skel);
|
||||
}
|
||||
|
||||
void test_map_kptr(void)
|
||||
static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
|
||||
{
|
||||
if (test__start_subtest("success")) {
|
||||
long gp_seq = READ_ONCE(rcu->bss->gp_seq);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts);
|
||||
|
||||
if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
|
||||
&opts), "do_call_rcu_tasks_trace"))
|
||||
return -EFAULT;
|
||||
if (!ASSERT_OK(opts.retval, "opts.retval == 0"))
|
||||
return -EFAULT;
|
||||
while (gp_seq == READ_ONCE(rcu->bss->gp_seq))
|
||||
sched_yield();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void serial_test_map_kptr(void)
|
||||
{
|
||||
struct rcu_tasks_trace_gp *skel;
|
||||
|
||||
RUN_TESTS(map_kptr_fail);
|
||||
|
||||
skel = rcu_tasks_trace_gp__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
|
||||
return;
|
||||
if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach"))
|
||||
goto end;
|
||||
|
||||
if (test__start_subtest("success-map")) {
|
||||
test_map_kptr_success(true);
|
||||
|
||||
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
|
||||
ASSERT_OK(kern_sync_rcu(), "sync rcu");
|
||||
/* Observe refcount dropping to 1 on bpf_map_free_deferred */
|
||||
test_map_kptr_success(false);
|
||||
/* Do test_run twice, so that we see refcount going back to 1
|
||||
* after we leave it in map from first iteration.
|
||||
*/
|
||||
|
||||
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
|
||||
ASSERT_OK(kern_sync_rcu(), "sync rcu");
|
||||
/* Observe refcount dropping to 1 on synchronous delete elem */
|
||||
test_map_kptr_success(true);
|
||||
}
|
||||
|
||||
RUN_TESTS(map_kptr_fail);
|
||||
end:
|
||||
rcu_tasks_trace_gp__destroy(skel);
|
||||
return;
|
||||
}
|
||||
|
||||
162
tools/testing/selftests/bpf/prog_tests/map_ops.c
Normal file
162
tools/testing/selftests/bpf/prog_tests/map_ops.c
Normal file
@@ -0,0 +1,162 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "test_map_ops.skel.h"
|
||||
#include "test_progs.h"
|
||||
|
||||
static void map_update(void)
|
||||
{
|
||||
(void)syscall(__NR_getpid);
|
||||
}
|
||||
|
||||
static void map_delete(void)
|
||||
{
|
||||
(void)syscall(__NR_getppid);
|
||||
}
|
||||
|
||||
static void map_push(void)
|
||||
{
|
||||
(void)syscall(__NR_getuid);
|
||||
}
|
||||
|
||||
static void map_pop(void)
|
||||
{
|
||||
(void)syscall(__NR_geteuid);
|
||||
}
|
||||
|
||||
static void map_peek(void)
|
||||
{
|
||||
(void)syscall(__NR_getgid);
|
||||
}
|
||||
|
||||
static void map_for_each_pass(void)
|
||||
{
|
||||
(void)syscall(__NR_gettid);
|
||||
}
|
||||
|
||||
static void map_for_each_fail(void)
|
||||
{
|
||||
(void)syscall(__NR_getpgid);
|
||||
}
|
||||
|
||||
static int setup(struct test_map_ops **skel)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!skel)
|
||||
return -1;
|
||||
|
||||
*skel = test_map_ops__open();
|
||||
if (!ASSERT_OK_PTR(*skel, "test_map_ops__open"))
|
||||
return -1;
|
||||
|
||||
(*skel)->rodata->pid = getpid();
|
||||
|
||||
err = test_map_ops__load(*skel);
|
||||
if (!ASSERT_OK(err, "test_map_ops__load"))
|
||||
return err;
|
||||
|
||||
err = test_map_ops__attach(*skel);
|
||||
if (!ASSERT_OK(err, "test_map_ops__attach"))
|
||||
return err;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void teardown(struct test_map_ops **skel)
|
||||
{
|
||||
if (skel && *skel)
|
||||
test_map_ops__destroy(*skel);
|
||||
}
|
||||
|
||||
static void map_ops_update_delete_subtest(void)
|
||||
{
|
||||
struct test_map_ops *skel;
|
||||
|
||||
if (setup(&skel))
|
||||
goto teardown;
|
||||
|
||||
map_update();
|
||||
ASSERT_OK(skel->bss->err, "map_update_initial");
|
||||
|
||||
map_update();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_update_existing");
|
||||
ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing");
|
||||
|
||||
map_delete();
|
||||
ASSERT_OK(skel->bss->err, "map_delete_existing");
|
||||
|
||||
map_delete();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing");
|
||||
ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing");
|
||||
|
||||
teardown:
|
||||
teardown(&skel);
|
||||
}
|
||||
|
||||
static void map_ops_push_peek_pop_subtest(void)
|
||||
{
|
||||
struct test_map_ops *skel;
|
||||
|
||||
if (setup(&skel))
|
||||
goto teardown;
|
||||
|
||||
map_push();
|
||||
ASSERT_OK(skel->bss->err, "map_push_initial");
|
||||
|
||||
map_push();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_push_when_full");
|
||||
ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full");
|
||||
|
||||
map_peek();
|
||||
ASSERT_OK(skel->bss->err, "map_peek");
|
||||
|
||||
map_pop();
|
||||
ASSERT_OK(skel->bss->err, "map_pop");
|
||||
|
||||
map_peek();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty");
|
||||
ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty");
|
||||
|
||||
map_pop();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty");
|
||||
ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty");
|
||||
|
||||
teardown:
|
||||
teardown(&skel);
|
||||
}
|
||||
|
||||
static void map_ops_for_each_subtest(void)
|
||||
{
|
||||
struct test_map_ops *skel;
|
||||
|
||||
if (setup(&skel))
|
||||
goto teardown;
|
||||
|
||||
map_for_each_pass();
|
||||
/* expect to iterate over 1 element */
|
||||
ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags");
|
||||
|
||||
map_for_each_fail();
|
||||
ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags");
|
||||
ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags");
|
||||
|
||||
teardown:
|
||||
teardown(&skel);
|
||||
}
|
||||
|
||||
void test_map_ops(void)
|
||||
{
|
||||
if (test__start_subtest("map_ops_update_delete"))
|
||||
map_ops_update_delete_subtest();
|
||||
|
||||
if (test__start_subtest("map_ops_push_peek_pop"))
|
||||
map_ops_push_peek_pop_subtest();
|
||||
|
||||
if (test__start_subtest("map_ops_for_each"))
|
||||
map_ops_for_each_subtest();
|
||||
}
|
||||
128
tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
Normal file
128
tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
Normal file
@@ -0,0 +1,128 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Red Hat */
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
#include "bpf/libbpf_internal.h"
|
||||
#include "cgroup_helpers.h"
|
||||
|
||||
static const char *module_name = "bpf_testmod";
|
||||
static const char *symbol_name = "bpf_fentry_shadow_test";
|
||||
|
||||
static int get_bpf_testmod_btf_fd(void)
|
||||
{
|
||||
struct bpf_btf_info info;
|
||||
char name[64];
|
||||
__u32 id = 0, len;
|
||||
int err, fd;
|
||||
|
||||
while (true) {
|
||||
err = bpf_btf_get_next_id(id, &id);
|
||||
if (err) {
|
||||
log_err("failed to iterate BTF objects");
|
||||
return err;
|
||||
}
|
||||
|
||||
fd = bpf_btf_get_fd_by_id(id);
|
||||
if (fd < 0) {
|
||||
if (errno == ENOENT)
|
||||
continue; /* expected race: BTF was unloaded */
|
||||
err = -errno;
|
||||
log_err("failed to get FD for BTF object #%d", id);
|
||||
return err;
|
||||
}
|
||||
|
||||
len = sizeof(info);
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
log_err("failed to get info for BTF object #%d", id);
|
||||
close(fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (strcmp(name, module_name) == 0)
|
||||
return fd;
|
||||
|
||||
close(fd);
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
void test_module_fentry_shadow(void)
|
||||
{
|
||||
struct btf *vmlinux_btf = NULL, *mod_btf = NULL;
|
||||
int err, i;
|
||||
int btf_fd[2] = {};
|
||||
int prog_fd[2] = {};
|
||||
int link_fd[2] = {};
|
||||
__s32 btf_id[2] = {};
|
||||
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
|
||||
.expected_attach_type = BPF_TRACE_FENTRY,
|
||||
);
|
||||
|
||||
const struct bpf_insn trace_program[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
|
||||
vmlinux_btf = btf__load_vmlinux_btf();
|
||||
if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux_btf"))
|
||||
return;
|
||||
|
||||
btf_fd[1] = get_bpf_testmod_btf_fd();
|
||||
if (!ASSERT_GE(btf_fd[1], 0, "get_bpf_testmod_btf_fd"))
|
||||
goto out;
|
||||
|
||||
mod_btf = btf_get_from_fd(btf_fd[1], vmlinux_btf);
|
||||
if (!ASSERT_OK_PTR(mod_btf, "btf_get_from_fd"))
|
||||
goto out;
|
||||
|
||||
btf_id[0] = btf__find_by_name_kind(vmlinux_btf, symbol_name, BTF_KIND_FUNC);
|
||||
if (!ASSERT_GT(btf_id[0], 0, "btf_find_by_name"))
|
||||
goto out;
|
||||
|
||||
btf_id[1] = btf__find_by_name_kind(mod_btf, symbol_name, BTF_KIND_FUNC);
|
||||
if (!ASSERT_GT(btf_id[1], 0, "btf_find_by_name"))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
load_opts.attach_btf_id = btf_id[i];
|
||||
load_opts.attach_btf_obj_fd = btf_fd[i];
|
||||
prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
|
||||
trace_program,
|
||||
sizeof(trace_program) / sizeof(struct bpf_insn),
|
||||
&load_opts);
|
||||
if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load"))
|
||||
goto out;
|
||||
|
||||
/* If the verifier incorrectly resolves addresses of the
|
||||
* shadowed functions and uses the same address for both the
|
||||
* vmlinux and the bpf_testmod functions, this will fail on
|
||||
* attempting to create two trampolines for the same address,
|
||||
* which is forbidden.
|
||||
*/
|
||||
link_fd[i] = bpf_link_create(prog_fd[i], 0, BPF_TRACE_FENTRY, NULL);
|
||||
if (!ASSERT_GE(link_fd[i], 0, "bpf_link_create"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run_opts(prog_fd[0], NULL);
|
||||
ASSERT_OK(err, "running test");
|
||||
|
||||
out:
|
||||
btf__free(vmlinux_btf);
|
||||
btf__free(mod_btf);
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (btf_fd[i])
|
||||
close(btf_fd[i]);
|
||||
if (prog_fd[i] > 0)
|
||||
close(prog_fd[i]);
|
||||
if (link_fd[i] > 0)
|
||||
close(link_fd[i]);
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,8 @@
|
||||
#include "network_helpers.h"
|
||||
#include "mptcp_sock.skel.h"
|
||||
|
||||
#define NS_TEST "mptcp_ns"
|
||||
|
||||
#ifndef TCP_CA_NAME_MAX
|
||||
#define TCP_CA_NAME_MAX 16
|
||||
#endif
|
||||
@@ -138,12 +140,20 @@ static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)
|
||||
|
||||
static void test_base(void)
|
||||
{
|
||||
struct nstoken *nstoken = NULL;
|
||||
int server_fd, cgroup_fd;
|
||||
|
||||
cgroup_fd = test__join_cgroup("/mptcp");
|
||||
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
|
||||
return;
|
||||
|
||||
SYS(fail, "ip netns add %s", NS_TEST);
|
||||
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
|
||||
|
||||
nstoken = open_netns(NS_TEST);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
goto fail;
|
||||
|
||||
/* without MPTCP */
|
||||
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
|
||||
if (!ASSERT_GE(server_fd, 0, "start_server"))
|
||||
@@ -157,13 +167,18 @@ static void test_base(void)
|
||||
/* with MPTCP */
|
||||
server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
|
||||
if (!ASSERT_GE(server_fd, 0, "start_mptcp_server"))
|
||||
goto close_cgroup_fd;
|
||||
goto fail;
|
||||
|
||||
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp");
|
||||
|
||||
close(server_fd);
|
||||
|
||||
close_cgroup_fd:
|
||||
fail:
|
||||
if (nstoken)
|
||||
close_netns(nstoken);
|
||||
|
||||
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
|
||||
|
||||
close(cgroup_fd);
|
||||
}
|
||||
|
||||
|
||||
93
tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
Normal file
93
tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
Normal file
@@ -0,0 +1,93 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include "test_parse_tcp_hdr_opt.skel.h"
|
||||
#include "test_parse_tcp_hdr_opt_dynptr.skel.h"
|
||||
#include "test_tcp_hdr_options.h"
|
||||
|
||||
struct test_pkt {
|
||||
struct ipv6_packet pk6_v6;
|
||||
u8 options[16];
|
||||
} __packed;
|
||||
|
||||
struct test_pkt pkt = {
|
||||
.pk6_v6.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
|
||||
.pk6_v6.iph.nexthdr = IPPROTO_TCP,
|
||||
.pk6_v6.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
|
||||
.pk6_v6.tcp.urg_ptr = 123,
|
||||
.pk6_v6.tcp.doff = 9, /* 16 bytes of options */
|
||||
|
||||
.options = {
|
||||
TCPOPT_MSS, 4, 0x05, 0xB4, TCPOPT_NOP, TCPOPT_NOP,
|
||||
0, 6, 0xBB, 0xBB, 0xBB, 0xBB, TCPOPT_EOL
|
||||
},
|
||||
};
|
||||
|
||||
static void test_parse_opt(void)
|
||||
{
|
||||
struct test_parse_tcp_hdr_opt *skel;
|
||||
struct bpf_program *prog;
|
||||
char buf[128];
|
||||
int err;
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt,
|
||||
.data_size_in = sizeof(pkt),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.repeat = 3,
|
||||
);
|
||||
|
||||
skel = test_parse_tcp_hdr_opt__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
|
||||
return;
|
||||
|
||||
pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
|
||||
prog = skel->progs.xdp_ingress_v6;
|
||||
|
||||
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
|
||||
ASSERT_OK(err, "ipv6 test_run");
|
||||
ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
|
||||
ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
|
||||
|
||||
test_parse_tcp_hdr_opt__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_parse_opt_dynptr(void)
|
||||
{
|
||||
struct test_parse_tcp_hdr_opt_dynptr *skel;
|
||||
struct bpf_program *prog;
|
||||
char buf[128];
|
||||
int err;
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt,
|
||||
.data_size_in = sizeof(pkt),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.repeat = 3,
|
||||
);
|
||||
|
||||
skel = test_parse_tcp_hdr_opt_dynptr__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
|
||||
return;
|
||||
|
||||
pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
|
||||
prog = skel->progs.xdp_ingress_v6;
|
||||
|
||||
err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
|
||||
ASSERT_OK(err, "ipv6 test_run");
|
||||
ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
|
||||
ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
|
||||
|
||||
test_parse_tcp_hdr_opt_dynptr__destroy(skel);
|
||||
}
|
||||
|
||||
void test_parse_tcp_hdr_opt(void)
|
||||
{
|
||||
if (test__start_subtest("parse_tcp_hdr_opt"))
|
||||
test_parse_opt();
|
||||
if (test__start_subtest("parse_tcp_hdr_opt_dynptr"))
|
||||
test_parse_opt_dynptr();
|
||||
}
|
||||
@@ -63,7 +63,8 @@ void test_perf_event_stackmap(void)
|
||||
PERF_SAMPLE_BRANCH_NO_FLAGS |
|
||||
PERF_SAMPLE_BRANCH_NO_CYCLES |
|
||||
PERF_SAMPLE_BRANCH_CALL_STACK,
|
||||
.sample_period = 5000,
|
||||
.freq = 1,
|
||||
.sample_freq = read_perf_max_sample_freq(),
|
||||
.size = sizeof(struct perf_event_attr),
|
||||
};
|
||||
struct perf_event_stackmap *skel;
|
||||
|
||||
@@ -77,6 +77,29 @@ static void test_rbtree_first_and_remove(void)
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_api_release_aliasing(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
|
||||
ASSERT_OK(ret, "rbtree_api_release_aliasing");
|
||||
ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
|
||||
ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
|
||||
ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
void test_rbtree_success(void)
|
||||
{
|
||||
if (test__start_subtest("rbtree_add_nodes"))
|
||||
@@ -85,6 +108,8 @@ void test_rbtree_success(void)
|
||||
test_rbtree_add_and_remove();
|
||||
if (test__start_subtest("rbtree_first_and_remove"))
|
||||
test_rbtree_first_and_remove();
|
||||
if (test__start_subtest("rbtree_api_release_aliasing"))
|
||||
test_rbtree_api_release_aliasing();
|
||||
}
|
||||
|
||||
#define BTF_FAIL_TEST(suffix) \
|
||||
|
||||
@@ -25,10 +25,10 @@ static void test_success(void)
|
||||
|
||||
bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
|
||||
bpf_program__set_autoload(skel->progs.task_succ, true);
|
||||
bpf_program__set_autoload(skel->progs.no_lock, true);
|
||||
bpf_program__set_autoload(skel->progs.two_regions, true);
|
||||
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
|
||||
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
|
||||
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
|
||||
err = rcu_read_lock__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto out;
|
||||
@@ -69,6 +69,7 @@ static void test_rcuptr_acquire(void)
|
||||
|
||||
static const char * const inproper_region_tests[] = {
|
||||
"miss_lock",
|
||||
"no_lock",
|
||||
"miss_unlock",
|
||||
"non_sleepable_rcu_mismatch",
|
||||
"inproper_sleepable_helper",
|
||||
@@ -99,7 +100,6 @@ static void test_inproper_region(void)
|
||||
}
|
||||
|
||||
static const char * const rcuptr_misuse_tests[] = {
|
||||
"task_untrusted_non_rcuptr",
|
||||
"task_untrusted_rcuptr",
|
||||
"cross_rcu_region",
|
||||
};
|
||||
@@ -128,17 +128,8 @@ static void test_rcuptr_misuse(void)
|
||||
|
||||
void test_rcu_read_lock(void)
|
||||
{
|
||||
struct btf *vmlinux_btf;
|
||||
int cgroup_fd;
|
||||
|
||||
vmlinux_btf = btf__load_vmlinux_btf();
|
||||
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
|
||||
return;
|
||||
if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) {
|
||||
test__skip();
|
||||
goto out;
|
||||
}
|
||||
|
||||
cgroup_fd = test__join_cgroup("/rcu_read_lock");
|
||||
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
|
||||
goto out;
|
||||
@@ -153,6 +144,5 @@ void test_rcu_read_lock(void)
|
||||
if (test__start_subtest("negative_tests_rcuptr_misuse"))
|
||||
test_rcuptr_misuse();
|
||||
close(cgroup_fd);
|
||||
out:
|
||||
btf__free(vmlinux_btf);
|
||||
out:;
|
||||
}
|
||||
|
||||
16
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
Normal file
16
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
Normal file
@@ -0,0 +1,16 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include "refcounted_kptr.skel.h"
|
||||
#include "refcounted_kptr_fail.skel.h"
|
||||
|
||||
void test_refcounted_kptr(void)
|
||||
{
|
||||
}
|
||||
|
||||
void test_refcounted_kptr_fail(void)
|
||||
{
|
||||
}
|
||||
@@ -64,8 +64,12 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
||||
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
|
||||
|
||||
/* wait a little for signal handler */
|
||||
for (int i = 0; i < 1000000000 && !sigusr1_received; i++)
|
||||
for (int i = 0; i < 1000000000 && !sigusr1_received; i++) {
|
||||
j /= i + j + 1;
|
||||
if (!attr)
|
||||
/* trigger the nanosleep tracepoint program. */
|
||||
usleep(1);
|
||||
}
|
||||
|
||||
buf[0] = sigusr1_received ? '2' : '0';
|
||||
ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");
|
||||
|
||||
@@ -18,6 +18,12 @@
|
||||
#include <string.h>
|
||||
#include <sys/select.h>
|
||||
#include <unistd.h>
|
||||
#include <linux/vm_sockets.h>
|
||||
|
||||
/* workaround for older vm_sockets.h */
|
||||
#ifndef VMADDR_CID_LOCAL
|
||||
#define VMADDR_CID_LOCAL 1
|
||||
#endif
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
@@ -251,6 +257,16 @@ static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
|
||||
*len = sizeof(*addr6);
|
||||
}
|
||||
|
||||
static void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len)
|
||||
{
|
||||
struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
|
||||
|
||||
addr->svm_family = AF_VSOCK;
|
||||
addr->svm_port = VMADDR_PORT_ANY;
|
||||
addr->svm_cid = VMADDR_CID_LOCAL;
|
||||
*len = sizeof(*addr);
|
||||
}
|
||||
|
||||
static void init_addr_loopback(int family, struct sockaddr_storage *ss,
|
||||
socklen_t *len)
|
||||
{
|
||||
@@ -261,6 +277,9 @@ static void init_addr_loopback(int family, struct sockaddr_storage *ss,
|
||||
case AF_INET6:
|
||||
init_addr_loopback6(ss, len);
|
||||
return;
|
||||
case AF_VSOCK:
|
||||
init_addr_loopback_vsock(ss, len);
|
||||
return;
|
||||
default:
|
||||
FAIL("unsupported address family %d", family);
|
||||
}
|
||||
@@ -1478,6 +1497,8 @@ static const char *family_str(sa_family_t family)
|
||||
return "IPv6";
|
||||
case AF_UNIX:
|
||||
return "Unix";
|
||||
case AF_VSOCK:
|
||||
return "VSOCK";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
@@ -1689,6 +1710,151 @@ static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *ma
|
||||
unix_skb_redir_to_connected(skel, map, sotype);
|
||||
}
|
||||
|
||||
/* Returns two connected loopback vsock sockets */
|
||||
static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)
|
||||
{
|
||||
struct sockaddr_storage addr;
|
||||
socklen_t len = sizeof(addr);
|
||||
int s, p, c;
|
||||
|
||||
s = socket_loopback(AF_VSOCK, sotype);
|
||||
if (s < 0)
|
||||
return -1;
|
||||
|
||||
c = xsocket(AF_VSOCK, sotype | SOCK_NONBLOCK, 0);
|
||||
if (c == -1)
|
||||
goto close_srv;
|
||||
|
||||
if (getsockname(s, sockaddr(&addr), &len) < 0)
|
||||
goto close_cli;
|
||||
|
||||
if (connect(c, sockaddr(&addr), len) < 0 && errno != EINPROGRESS) {
|
||||
FAIL_ERRNO("connect");
|
||||
goto close_cli;
|
||||
}
|
||||
|
||||
len = sizeof(addr);
|
||||
p = accept_timeout(s, sockaddr(&addr), &len, IO_TIMEOUT_SEC);
|
||||
if (p < 0)
|
||||
goto close_cli;
|
||||
|
||||
*v0 = p;
|
||||
*v1 = c;
|
||||
|
||||
return 0;
|
||||
|
||||
close_cli:
|
||||
close(c);
|
||||
close_srv:
|
||||
close(s);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd,
|
||||
enum redir_mode mode, int sotype)
|
||||
{
|
||||
const char *log_prefix = redir_mode_str(mode);
|
||||
char a = 'a', b = 'b';
|
||||
int u0, u1, v0, v1;
|
||||
int sfd[2];
|
||||
unsigned int pass;
|
||||
int err, n;
|
||||
u32 key;
|
||||
|
||||
zero_verdict_count(verd_mapfd);
|
||||
|
||||
if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, sfd))
|
||||
return;
|
||||
|
||||
u0 = sfd[0];
|
||||
u1 = sfd[1];
|
||||
|
||||
err = vsock_socketpair_connectible(sotype, &v0, &v1);
|
||||
if (err) {
|
||||
FAIL("vsock_socketpair_connectible() failed");
|
||||
goto close_uds;
|
||||
}
|
||||
|
||||
err = add_to_sockmap(sock_mapfd, u0, v0);
|
||||
if (err) {
|
||||
FAIL("add_to_sockmap failed");
|
||||
goto close_vsock;
|
||||
}
|
||||
|
||||
n = write(v1, &a, sizeof(a));
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: write", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete write", log_prefix);
|
||||
if (n < 1)
|
||||
goto out;
|
||||
|
||||
n = recv(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), MSG_DONTWAIT);
|
||||
if (n < 0)
|
||||
FAIL("%s: recv() err, errno=%d", log_prefix, errno);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
if (b != a)
|
||||
FAIL("%s: vsock socket map failed, %c != %c", log_prefix, a, b);
|
||||
|
||||
key = SK_PASS;
|
||||
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
|
||||
if (err)
|
||||
goto out;
|
||||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
out:
|
||||
key = 0;
|
||||
bpf_map_delete_elem(sock_mapfd, &key);
|
||||
key = 1;
|
||||
bpf_map_delete_elem(sock_mapfd, &key);
|
||||
|
||||
close_vsock:
|
||||
close(v0);
|
||||
close(v1);
|
||||
|
||||
close_uds:
|
||||
close(u0);
|
||||
close(u1);
|
||||
}
|
||||
|
||||
static void vsock_unix_skb_redir_connectible(struct test_sockmap_listen *skel,
|
||||
struct bpf_map *inner_map,
|
||||
int sotype)
|
||||
{
|
||||
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
|
||||
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
|
||||
int sock_map = bpf_map__fd(inner_map);
|
||||
int err;
|
||||
|
||||
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
skel->bss->test_ingress = false;
|
||||
vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_EGRESS, sotype);
|
||||
skel->bss->test_ingress = true;
|
||||
vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_INGRESS, sotype);
|
||||
|
||||
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
|
||||
}
|
||||
|
||||
static void test_vsock_redir(struct test_sockmap_listen *skel, struct bpf_map *map)
|
||||
{
|
||||
const char *family_name, *map_name;
|
||||
char s[MAX_TEST_NAME];
|
||||
|
||||
family_name = family_str(AF_VSOCK);
|
||||
map_name = map_type_str(map);
|
||||
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
|
||||
if (!test__start_subtest(s))
|
||||
return;
|
||||
|
||||
vsock_unix_skb_redir_connectible(skel, map, SOCK_STREAM);
|
||||
vsock_unix_skb_redir_connectible(skel, map, SOCK_SEQPACKET);
|
||||
}
|
||||
|
||||
static void test_reuseport(struct test_sockmap_listen *skel,
|
||||
struct bpf_map *map, int family, int sotype)
|
||||
{
|
||||
@@ -2060,12 +2226,14 @@ void serial_test_sockmap_listen(void)
|
||||
run_tests(skel, skel->maps.sock_map, AF_INET6);
|
||||
test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM);
|
||||
test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM);
|
||||
test_vsock_redir(skel, skel->maps.sock_map);
|
||||
|
||||
skel->bss->test_sockmap = false;
|
||||
run_tests(skel, skel->maps.sock_hash, AF_INET);
|
||||
run_tests(skel, skel->maps.sock_hash, AF_INET6);
|
||||
test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM);
|
||||
test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM);
|
||||
test_vsock_redir(skel, skel->maps.sock_hash);
|
||||
|
||||
test_sockmap_listen__destroy(skel);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user