mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-04 10:56:06 -04:00
Merge branches 'arm/shmobile', 'arm/renesas', 'arm/msm', 'arm/smmu', 'arm/omap', 'x86/amd', 'x86/vt-d' and 'core' into next
This commit is contained in:
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
|
||||
*/
|
||||
|
||||
#ifndef pte_free_tlb
|
||||
#define pte_free_tlb(tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__pte_free_tlb(tlb, ptep, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef pmd_free_tlb
|
||||
#define pmd_free_tlb(tlb, pmdp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__pmd_free_tlb(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_HAS_4LEVEL_HACK
|
||||
#ifndef pud_free_tlb
|
||||
#define pud_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__pud_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_HAS_5LEVEL_HACK
|
||||
#ifndef p4d_free_tlb
|
||||
#define p4d_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__p4d_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define tlb_migrate_finish(mm) do {} while (0)
|
||||
|
||||
|
||||
@@ -235,27 +235,25 @@
|
||||
#define IMX6UL_CLK_CSI_PODF 222
|
||||
#define IMX6UL_CLK_PLL3_120M 223
|
||||
#define IMX6UL_CLK_KPP 224
|
||||
#define IMX6UL_CLK_CKO1_SEL 225
|
||||
#define IMX6UL_CLK_CKO1_PODF 226
|
||||
#define IMX6UL_CLK_CKO1 227
|
||||
#define IMX6UL_CLK_CKO2_SEL 228
|
||||
#define IMX6UL_CLK_CKO2_PODF 229
|
||||
#define IMX6UL_CLK_CKO2 230
|
||||
#define IMX6UL_CLK_CKO 231
|
||||
|
||||
/* For i.MX6ULL */
|
||||
#define IMX6ULL_CLK_ESAI_PRED 232
|
||||
#define IMX6ULL_CLK_ESAI_PODF 233
|
||||
#define IMX6ULL_CLK_ESAI_EXTAL 234
|
||||
#define IMX6ULL_CLK_ESAI_MEM 235
|
||||
#define IMX6ULL_CLK_ESAI_IPG 236
|
||||
#define IMX6ULL_CLK_DCP_CLK 237
|
||||
#define IMX6ULL_CLK_EPDC_PRE_SEL 238
|
||||
#define IMX6ULL_CLK_EPDC_SEL 239
|
||||
#define IMX6ULL_CLK_EPDC_PODF 240
|
||||
#define IMX6ULL_CLK_EPDC_ACLK 241
|
||||
#define IMX6ULL_CLK_EPDC_PIX 242
|
||||
#define IMX6ULL_CLK_ESAI_SEL 243
|
||||
#define IMX6ULL_CLK_ESAI_PRED 225
|
||||
#define IMX6ULL_CLK_ESAI_PODF 226
|
||||
#define IMX6ULL_CLK_ESAI_EXTAL 227
|
||||
#define IMX6ULL_CLK_ESAI_MEM 228
|
||||
#define IMX6ULL_CLK_ESAI_IPG 229
|
||||
#define IMX6ULL_CLK_DCP_CLK 230
|
||||
#define IMX6ULL_CLK_EPDC_PRE_SEL 231
|
||||
#define IMX6ULL_CLK_EPDC_SEL 232
|
||||
#define IMX6ULL_CLK_EPDC_PODF 233
|
||||
#define IMX6ULL_CLK_EPDC_ACLK 234
|
||||
#define IMX6ULL_CLK_EPDC_PIX 235
|
||||
#define IMX6ULL_CLK_ESAI_SEL 236
|
||||
#define IMX6UL_CLK_CKO1_SEL 237
|
||||
#define IMX6UL_CLK_CKO1_PODF 238
|
||||
#define IMX6UL_CLK_CKO1 239
|
||||
#define IMX6UL_CLK_CKO2_SEL 240
|
||||
#define IMX6UL_CLK_CKO2_PODF 241
|
||||
#define IMX6UL_CLK_CKO2 242
|
||||
#define IMX6UL_CLK_CKO 243
|
||||
#define IMX6UL_CLK_END 244
|
||||
|
||||
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
|
||||
|
||||
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
|
||||
|
||||
/**
|
||||
* blk_mq_mark_complete() - Set request state to complete
|
||||
* @rq: request to set to complete state
|
||||
*
|
||||
* Returns true if request state was successfully set to complete. If
|
||||
* successful, the caller is responsibile for seeing this request is ended, as
|
||||
* blk_mq_complete_request will not work again.
|
||||
*/
|
||||
static inline bool blk_mq_mark_complete(struct request *rq)
|
||||
{
|
||||
return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
|
||||
MQ_RQ_IN_FLIGHT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver command data is immediately after the request. So subtract request
|
||||
* size to get back to the original request, add request size to get the PDU.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#ifndef _BPF_CGROUP_H
|
||||
#define _BPF_CGROUP_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
@@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype, struct bpf_prog *prog);
|
||||
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype);
|
||||
int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
#else
|
||||
|
||||
struct bpf_prog;
|
||||
struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
|
||||
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define cgroup_bpf_enabled (0)
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
|
||||
@@ -696,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
|
||||
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
|
||||
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
|
||||
int sockmap_get_from_fd(const union bpf_attr *attr, int type,
|
||||
struct bpf_prog *prog);
|
||||
#else
|
||||
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
@@ -714,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
#ifdef CONFIG_BPF_LIRC_MODE2
|
||||
int lirc_prog_attach(const union bpf_attr *attr);
|
||||
int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int lirc_prog_detach(const union bpf_attr *attr);
|
||||
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
|
||||
#else
|
||||
static inline int lirc_prog_attach(const union bpf_attr *attr)
|
||||
static inline int lirc_prog_attach(const union bpf_attr *attr,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
#include <uapi/linux/bpfilter.h>
|
||||
|
||||
struct sock;
|
||||
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
|
||||
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
unsigned int optlen);
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
|
||||
int *optlen);
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
int __user *optlen);
|
||||
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
|
||||
char __user *optval,
|
||||
unsigned int optlen, bool is_set);
|
||||
|
||||
@@ -65,6 +65,18 @@
|
||||
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
|
||||
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
|
||||
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
|
||||
* defined so the gnu89 semantics are the default.
|
||||
*/
|
||||
#ifdef __GNUC_STDC_INLINE__
|
||||
# define __gnu_inline __attribute__((gnu_inline))
|
||||
#else
|
||||
# define __gnu_inline
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Force always-inline if the user requests it so via the .config,
|
||||
* or if gcc is too old.
|
||||
@@ -72,19 +84,22 @@
|
||||
* -Wunused-function. This turns out to avoid the need for complex #ifdef
|
||||
* directives. Suppress the warning in clang as well by using "unused"
|
||||
* function attribute, which is redundant but not harmful for gcc.
|
||||
* Prefer gnu_inline, so that extern inline functions do not emit an
|
||||
* externally visible function. This makes extern inline behave as per gnu89
|
||||
* semantics rather than c99. This prevents multiple symbol definition errors
|
||||
* of extern inline functions at link time.
|
||||
* A lot of inline functions can cause havoc with function tracing.
|
||||
*/
|
||||
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
|
||||
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
|
||||
#define inline inline __attribute__((always_inline,unused)) notrace
|
||||
#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
|
||||
#define __inline __inline __attribute__((always_inline,unused)) notrace
|
||||
#define inline \
|
||||
inline __attribute__((always_inline, unused)) notrace __gnu_inline
|
||||
#else
|
||||
/* A lot of inline functions can cause havoc with function tracing */
|
||||
#define inline inline __attribute__((unused)) notrace
|
||||
#define __inline__ __inline__ __attribute__((unused)) notrace
|
||||
#define __inline __inline __attribute__((unused)) notrace
|
||||
#define inline inline __attribute__((unused)) notrace __gnu_inline
|
||||
#endif
|
||||
|
||||
#define __inline__ inline
|
||||
#define __inline inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
#define noinline __attribute__((noinline))
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
|
||||
|
||||
static inline void delayacct_blkio_end(struct task_struct *p)
|
||||
{
|
||||
if (current->delays)
|
||||
if (p->delays)
|
||||
__delayacct_blkio_end(p);
|
||||
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
|
||||
}
|
||||
|
||||
@@ -265,11 +265,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
|
||||
#define PDA_LOW_BIT 26
|
||||
#define PDA_HIGH_BIT 32
|
||||
|
||||
enum {
|
||||
IRQ_REMAP_XAPIC_MODE,
|
||||
IRQ_REMAP_X2APIC_MODE,
|
||||
};
|
||||
|
||||
/* Can't use the common MSI interrupt functions
|
||||
* since DMAR is not a pci device
|
||||
*/
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
|
||||
|
||||
@@ -470,9 +470,7 @@ struct sock_fprog_kern {
|
||||
};
|
||||
|
||||
struct bpf_binary_header {
|
||||
u16 pages;
|
||||
u16 locked:1;
|
||||
|
||||
u32 pages;
|
||||
/* Some arches need word alignment for their instructions */
|
||||
u8 image[] __aligned(4);
|
||||
};
|
||||
@@ -481,7 +479,7 @@ struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
jit_requested:1,/* archs need to JIT the prog */
|
||||
locked:1, /* Program image locked? */
|
||||
undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
@@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
fp->locked = 1;
|
||||
if (set_memory_ro((unsigned long)fp, fp->pages))
|
||||
fp->locked = 0;
|
||||
#endif
|
||||
fp->undo_set_mem = 1;
|
||||
set_memory_ro((unsigned long)fp, fp->pages);
|
||||
}
|
||||
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
if (fp->locked) {
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
|
||||
/* In case set_memory_rw() fails, we want to be the first
|
||||
* to crash here instead of some random place later on.
|
||||
*/
|
||||
fp->locked = 0;
|
||||
}
|
||||
#endif
|
||||
if (fp->undo_set_mem)
|
||||
set_memory_rw((unsigned long)fp, fp->pages);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
hdr->locked = 1;
|
||||
if (set_memory_ro((unsigned long)hdr, hdr->pages))
|
||||
hdr->locked = 0;
|
||||
#endif
|
||||
set_memory_ro((unsigned long)hdr, hdr->pages);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
if (hdr->locked) {
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
|
||||
/* In case set_memory_rw() fails, we want to be the first
|
||||
* to crash here instead of some random place later on.
|
||||
*/
|
||||
hdr->locked = 0;
|
||||
}
|
||||
#endif
|
||||
set_memory_rw((unsigned long)hdr, hdr->pages);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
@@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
|
||||
{
|
||||
if (!fp->locked)
|
||||
return -ENOLCK;
|
||||
if (fp->jited) {
|
||||
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
|
||||
|
||||
if (!hdr->locked)
|
||||
return -ENOLCK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
@@ -805,8 +765,8 @@ static inline bool bpf_dump_raw_ok(void)
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
|
||||
static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
|
||||
struct net_device *fwd)
|
||||
static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
|
||||
unsigned int pktlen)
|
||||
{
|
||||
unsigned int len;
|
||||
|
||||
@@ -814,7 +774,7 @@ static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
|
||||
return -ENETDOWN;
|
||||
|
||||
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
|
||||
if (skb->len > len)
|
||||
if (pktlen > len)
|
||||
return -EMSGSIZE;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2420,6 +2420,7 @@ extern struct file *filp_open(const char *, int, umode_t);
|
||||
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
|
||||
const char *, int, umode_t);
|
||||
extern struct file * dentry_open(const struct path *, int, const struct cred *);
|
||||
extern struct file *filp_clone_open(struct file *);
|
||||
extern int filp_close(struct file *, fl_owner_t id);
|
||||
|
||||
extern struct filename *getname_flags(const char __user *, int, int *);
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#define __FSL_GUTS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/**
|
||||
* Global Utility Registers.
|
||||
|
||||
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
|
||||
*/
|
||||
int register_ftrace_function(struct ftrace_ops *ops);
|
||||
int unregister_ftrace_function(struct ftrace_ops *ops);
|
||||
void clear_ftrace_function(void);
|
||||
|
||||
extern void ftrace_stub(unsigned long a0, unsigned long a1,
|
||||
struct ftrace_ops *op, struct pt_regs *regs);
|
||||
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
|
||||
|
||||
@@ -511,6 +511,7 @@ struct hid_output_fifo {
|
||||
#define HID_STAT_ADDED BIT(0)
|
||||
#define HID_STAT_PARSED BIT(1)
|
||||
#define HID_STAT_DUP_DETECTED BIT(2)
|
||||
#define HID_STAT_REPROBED BIT(3)
|
||||
|
||||
struct hid_input {
|
||||
struct list_head list;
|
||||
@@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */
|
||||
bool battery_avoid_query;
|
||||
#endif
|
||||
|
||||
unsigned int status; /* see STAT flags above */
|
||||
unsigned long status; /* see STAT flags above */
|
||||
unsigned claimed; /* Claimed by hidinput, hiddev? */
|
||||
unsigned quirks; /* Various quirks the device can pull on us */
|
||||
bool io_started; /* If IO has started */
|
||||
|
||||
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
|
||||
|
||||
static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
|
||||
{
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
|
||||
struct bridge_vlan_info *p_vinfo)
|
||||
{
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -109,6 +109,8 @@ struct ip_mc_list {
|
||||
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
|
||||
extern int igmp_rcv(struct sk_buff *);
|
||||
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
|
||||
extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
|
||||
unsigned int mode);
|
||||
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
|
||||
extern void ip_mc_drop_socket(struct sock *sk);
|
||||
extern int ip_mc_source(int add, int omode, struct sock *sk,
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/dmar.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/iommu.h>
|
||||
@@ -114,6 +115,7 @@
|
||||
* Extended Capability Register
|
||||
*/
|
||||
|
||||
#define ecap_dit(e) ((e >> 41) & 0x1)
|
||||
#define ecap_pasid(e) ((e >> 40) & 0x1)
|
||||
#define ecap_pss(e) ((e >> 35) & 0x1f)
|
||||
#define ecap_eafs(e) ((e >> 34) & 0x1)
|
||||
@@ -121,6 +123,7 @@
|
||||
#define ecap_srs(e) ((e >> 31) & 0x1)
|
||||
#define ecap_ers(e) ((e >> 30) & 0x1)
|
||||
#define ecap_prs(e) ((e >> 29) & 0x1)
|
||||
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
|
||||
#define ecap_dis(e) ((e >> 27) & 0x1)
|
||||
#define ecap_nest(e) ((e >> 26) & 0x1)
|
||||
#define ecap_mts(e) ((e >> 25) & 0x1)
|
||||
@@ -283,6 +286,7 @@ enum {
|
||||
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
|
||||
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
|
||||
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_IOTLB_SIZE 1
|
||||
#define QI_DEV_IOTLB_MAX_INVS 32
|
||||
|
||||
@@ -307,6 +311,7 @@ enum {
|
||||
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
|
||||
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
|
||||
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
|
||||
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_EIOTLB_MAX_INVS 32
|
||||
|
||||
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
|
||||
@@ -384,6 +389,42 @@ struct pasid_entry;
|
||||
struct pasid_state_entry;
|
||||
struct page_req_dsc;
|
||||
|
||||
struct dmar_domain {
|
||||
int nid; /* node id */
|
||||
|
||||
unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
|
||||
/* Refcount of devices per iommu */
|
||||
|
||||
|
||||
u16 iommu_did[DMAR_UNITS_SUPPORTED];
|
||||
/* Domain ids per IOMMU. Use u16 since
|
||||
* domain ids are 16 bit wide according
|
||||
* to VT-d spec, section 9.3 */
|
||||
|
||||
bool has_iotlb_device;
|
||||
struct list_head devices; /* all devices' list */
|
||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||
|
||||
struct dma_pte *pgd; /* virtual address */
|
||||
int gaw; /* max guest address width */
|
||||
|
||||
/* adjusted guest address width, 0 is level 2 30-bit */
|
||||
int agaw;
|
||||
|
||||
int flags; /* flags to find out type of domain */
|
||||
|
||||
int iommu_coherency;/* indicate coherency of iommu access */
|
||||
int iommu_snooping; /* indicate snooping control feature*/
|
||||
int iommu_count; /* reference count of iommu */
|
||||
int iommu_superpage;/* Level of superpages supported:
|
||||
0 == 4KiB (no superpages), 1 == 2MiB,
|
||||
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
||||
u64 max_addr; /* maximum mapped address */
|
||||
|
||||
struct iommu_domain domain; /* generic domain data structure for
|
||||
iommu core */
|
||||
};
|
||||
|
||||
struct intel_iommu {
|
||||
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
||||
u64 reg_phys; /* physical address of hw register set */
|
||||
@@ -413,11 +454,9 @@ struct intel_iommu {
|
||||
* devices away to userspace processes (e.g. for DPDK) and don't
|
||||
* want to trust that userspace will use *only* the PASID it was
|
||||
* told to. But while it's all driver-arbitrated, we're fine. */
|
||||
struct pasid_entry *pasid_table;
|
||||
struct pasid_state_entry *pasid_state_table;
|
||||
struct page_req_dsc *prq;
|
||||
unsigned char prq_name[16]; /* Name for PRQ interrupt */
|
||||
struct idr pasid_idr;
|
||||
u32 pasid_max;
|
||||
#endif
|
||||
struct q_inval *qi; /* Queued invalidation info */
|
||||
@@ -433,6 +472,27 @@ struct intel_iommu {
|
||||
u32 flags; /* Software defined flags */
|
||||
};
|
||||
|
||||
/* PCI domain-device relationship */
|
||||
struct device_domain_info {
|
||||
struct list_head link; /* link to domain siblings */
|
||||
struct list_head global; /* link to global list */
|
||||
struct list_head table; /* link to pasid table */
|
||||
u8 bus; /* PCI bus number */
|
||||
u8 devfn; /* PCI devfn number */
|
||||
u16 pfsid; /* SRIOV physical function source ID */
|
||||
u8 pasid_supported:3;
|
||||
u8 pasid_enabled:1;
|
||||
u8 pri_supported:1;
|
||||
u8 pri_enabled:1;
|
||||
u8 ats_supported:1;
|
||||
u8 ats_enabled:1;
|
||||
u8 ats_qdep;
|
||||
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
|
||||
struct intel_iommu *iommu; /* IOMMU used by this device */
|
||||
struct dmar_domain *domain; /* pointer to domain */
|
||||
struct pasid_table *pasid_table; /* pasid table */
|
||||
};
|
||||
|
||||
static inline void __iommu_flush_cache(
|
||||
struct intel_iommu *iommu, void *addr, int size)
|
||||
{
|
||||
@@ -452,16 +512,22 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
|
||||
u8 fm, u64 type);
|
||||
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type);
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
||||
u64 addr, unsigned mask);
|
||||
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
u16 qdep, u64 addr, unsigned mask);
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern int dmar_ir_support(void);
|
||||
|
||||
struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
|
||||
void *alloc_pgtable_page(int node);
|
||||
void free_pgtable_page(void *vaddr);
|
||||
struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
|
||||
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
|
||||
void *data), void *data);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
|
||||
extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
|
||||
int intel_svm_init(struct intel_iommu *iommu);
|
||||
int intel_svm_exit(struct intel_iommu *iommu);
|
||||
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
|
||||
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
|
||||
|
||||
@@ -485,6 +551,7 @@ struct intel_svm {
|
||||
int flags;
|
||||
int pasid;
|
||||
struct list_head devs;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
|
||||
|
||||
@@ -166,8 +166,6 @@ struct iommu_resv_region {
|
||||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
|
||||
* to an iommu domain
|
||||
* @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
|
||||
* @tlb_range_add: Add a given iova range to the flush queue for this domain
|
||||
* @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
@@ -201,8 +199,6 @@ struct iommu_ops {
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot);
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_range_add)(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size);
|
||||
@@ -303,9 +299,8 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size);
|
||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents,
|
||||
int prot);
|
||||
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents, int prot);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
@@ -378,13 +373,6 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
|
||||
domain->ops->iotlb_sync(domain);
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
}
|
||||
|
||||
/* PCI device grouping function */
|
||||
extern struct iommu_group *pci_device_group(struct device *dev);
|
||||
/* Generic device grouping function */
|
||||
@@ -698,4 +686,11 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
||||
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
#ifdef CONFIG_IOMMU_DEBUGFS
|
||||
extern struct dentry *iommu_debugfs_dir;
|
||||
void iommu_debugfs_setup(void);
|
||||
#else
|
||||
static inline void iommu_debugfs_setup(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_IOMMU_H */
|
||||
|
||||
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
|
||||
int kthread_park(struct task_struct *k);
|
||||
void kthread_unpark(struct task_struct *k);
|
||||
void kthread_parkme(void);
|
||||
void kthread_park_complete(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
extern struct task_struct *kthreadd_task;
|
||||
|
||||
@@ -210,6 +210,7 @@ enum {
|
||||
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
|
||||
/* (doesn't imply presence) */
|
||||
ATA_FLAG_SATA = (1 << 1),
|
||||
ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
|
||||
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
|
||||
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
|
||||
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
|
||||
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag)
|
||||
return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
|
||||
}
|
||||
|
||||
#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
|
||||
for ((tag) = 0; (tag) < (max_tag) && \
|
||||
({ qc = fn((ap), (tag)); 1; }); (tag)++) \
|
||||
|
||||
/*
|
||||
* Internal use only, iterate commands ignoring error handling and
|
||||
* status of 'qc'.
|
||||
*/
|
||||
#define ata_qc_for_each_raw(ap, qc, tag) \
|
||||
__ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
|
||||
|
||||
/*
|
||||
* Iterate all potential commands that can be queued
|
||||
*/
|
||||
#define ata_qc_for_each(ap, qc, tag) \
|
||||
__ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
|
||||
|
||||
/*
|
||||
* Like ata_qc_for_each, but with the internal tag included
|
||||
*/
|
||||
#define ata_qc_for_each_with_internal(ap, qc, tag) \
|
||||
__ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
|
||||
|
||||
/*
|
||||
* device helpers
|
||||
*/
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
*/
|
||||
#define MARVELL_PHY_ID_88E6390 0x01410f90
|
||||
|
||||
#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
|
||||
|
||||
/* struct phy_device dev_flags definitions */
|
||||
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
|
||||
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
|
||||
|
||||
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
|
||||
struct mlx5_frag_buf frag_buf;
|
||||
u32 sz_m1;
|
||||
u32 frag_sz_m1;
|
||||
u32 strides_offset;
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
u8 log_frag_strides;
|
||||
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
|
||||
return key & 0xffffff00u;
|
||||
}
|
||||
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
|
||||
u32 strides_offset,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
fbc->log_stride = log_stride;
|
||||
fbc->log_sz = log_sz;
|
||||
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
||||
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
||||
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
||||
fbc->strides_offset = strides_offset;
|
||||
}
|
||||
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
||||
u32 ix)
|
||||
{
|
||||
unsigned int frag = (ix >> fbc->log_frag_strides);
|
||||
unsigned int frag;
|
||||
|
||||
ix += fbc->strides_offset;
|
||||
frag = ix >> fbc->log_frag_strides;
|
||||
|
||||
return fbc->frag_buf.frags[frag].buf +
|
||||
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
|
||||
|
||||
enum {
|
||||
SRIOV_NONE,
|
||||
SRIOV_LEGACY,
|
||||
|
||||
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 vnic_env_queue_counters[0x1];
|
||||
u8 ets[0x1];
|
||||
u8 nic_flow_table[0x1];
|
||||
u8 eswitch_flow_table[0x1];
|
||||
u8 eswitch_manager[0x1];
|
||||
u8 device_memory[0x1];
|
||||
u8 mcam_reg[0x1];
|
||||
u8 pcam_reg[0x1];
|
||||
|
||||
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
|
||||
* mmap() functions).
|
||||
*/
|
||||
|
||||
extern struct kmem_cache *vm_area_cachep;
|
||||
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
|
||||
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
|
||||
void vm_area_free(struct vm_area_struct *);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
extern struct rb_root nommu_region_tree;
|
||||
@@ -450,6 +452,23 @@ struct vm_operations_struct {
|
||||
unsigned long addr);
|
||||
};
|
||||
|
||||
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
|
||||
{
|
||||
static const struct vm_operations_struct dummy_vm_ops = {};
|
||||
|
||||
vma->vm_mm = mm;
|
||||
vma->vm_ops = &dummy_vm_ops;
|
||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
||||
}
|
||||
|
||||
static inline void vma_set_anonymous(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_ops = NULL;
|
||||
}
|
||||
|
||||
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
|
||||
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
|
||||
|
||||
struct mmu_gather;
|
||||
struct inode;
|
||||
|
||||
@@ -2132,7 +2151,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
struct mminit_pfnnid_cache *state);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK
|
||||
#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
|
||||
void zero_resv_unavail(void);
|
||||
#else
|
||||
static inline void zero_resv_unavail(void) {}
|
||||
|
||||
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
|
||||
if (PTR_ERR(pp) != -EINPROGRESS)
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
}
|
||||
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
|
||||
struct sk_buff **pp,
|
||||
int flush,
|
||||
struct gro_remcsum *grc)
|
||||
{
|
||||
if (PTR_ERR(pp) != -EINPROGRESS) {
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_remcsum_cleanup(skb, grc);
|
||||
skb->remcsum_offload = 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
|
||||
{
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
}
|
||||
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
|
||||
struct sk_buff **pp,
|
||||
int flush,
|
||||
struct gro_remcsum *grc)
|
||||
{
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_remcsum_cleanup(skb, grc);
|
||||
skb->remcsum_offload = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
@@ -368,7 +368,6 @@ struct pci_dev {
|
||||
unsigned int transparent:1; /* Subtractive decode bridge */
|
||||
unsigned int multifunction:1; /* Multi-function device */
|
||||
|
||||
unsigned int is_added:1;
|
||||
unsigned int is_busmaster:1; /* Is busmaster */
|
||||
unsigned int no_msi:1; /* May not use MSI */
|
||||
unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
|
||||
@@ -1240,6 +1239,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
|
||||
unsigned long pci_address_to_pio(phys_addr_t addr);
|
||||
phys_addr_t pci_pio_to_address(unsigned long pio);
|
||||
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
|
||||
int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
|
||||
phys_addr_t phys_addr);
|
||||
void pci_unmap_iospace(struct resource *res);
|
||||
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
|
||||
resource_size_t offset,
|
||||
|
||||
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
|
||||
extern struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
u32 max_stack, bool crosstask, bool add_mark);
|
||||
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
|
||||
extern int get_callchain_buffers(int max_stack);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
|
||||
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_off(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
|
||||
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
|
||||
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
|
||||
extern void rt_mutex_destroy(struct rt_mutex *lock);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
|
||||
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
|
||||
#else
|
||||
extern void rt_mutex_lock(struct rt_mutex *lock);
|
||||
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
|
||||
#endif
|
||||
|
||||
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
|
||||
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
|
||||
struct hrtimer_sleeper *timeout);
|
||||
|
||||
@@ -118,7 +118,7 @@ struct task_group;
|
||||
* the comment with set_special_state().
|
||||
*/
|
||||
#define is_special_task_state(state) \
|
||||
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
|
||||
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
|
||||
|
||||
#define __set_current_state(state_value) \
|
||||
do { \
|
||||
|
||||
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
|
||||
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
|
||||
struct task_struct *fork_idle(int);
|
||||
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
||||
extern long kernel_wait4(pid_t, int *, int, struct rusage *);
|
||||
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
|
||||
|
||||
extern void free_task(struct task_struct *tsk);
|
||||
|
||||
|
||||
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @hash: the packet hash
|
||||
* @queue_mapping: Queue mapping for multiqueue devices
|
||||
* @xmit_more: More SKBs are pending for this queue
|
||||
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
|
||||
* @ndisc_nodetype: router type (from link layer)
|
||||
* @ooo_okay: allow the mapping of a socket to a queue to be changed
|
||||
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
|
||||
@@ -735,7 +736,7 @@ struct sk_buff {
|
||||
peeked:1,
|
||||
head_frag:1,
|
||||
xmit_more:1,
|
||||
__unused:1; /* one bit hole */
|
||||
pfmemalloc:1;
|
||||
|
||||
/* fields enclosed in headers_start/headers_end are copied
|
||||
* using a single memcpy() in __copy_skb_header()
|
||||
@@ -754,31 +755,30 @@ struct sk_buff {
|
||||
|
||||
__u8 __pkt_type_offset[0];
|
||||
__u8 pkt_type:3;
|
||||
__u8 pfmemalloc:1;
|
||||
__u8 ignore_df:1;
|
||||
|
||||
__u8 nf_trace:1;
|
||||
__u8 ip_summed:2;
|
||||
__u8 ooo_okay:1;
|
||||
|
||||
__u8 l4_hash:1;
|
||||
__u8 sw_hash:1;
|
||||
__u8 wifi_acked_valid:1;
|
||||
__u8 wifi_acked:1;
|
||||
|
||||
__u8 no_fcs:1;
|
||||
/* Indicates the inner headers are valid in the skbuff. */
|
||||
__u8 encapsulation:1;
|
||||
__u8 encap_hdr_csum:1;
|
||||
__u8 csum_valid:1;
|
||||
|
||||
__u8 csum_complete_sw:1;
|
||||
__u8 csum_level:2;
|
||||
__u8 csum_not_inet:1;
|
||||
|
||||
__u8 dst_pending_confirm:1;
|
||||
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
||||
__u8 ndisc_nodetype:2;
|
||||
#endif
|
||||
__u8 ipvs_property:1;
|
||||
|
||||
__u8 inner_protocol_type:1;
|
||||
__u8 remcsum_offload:1;
|
||||
#ifdef CONFIG_NET_SWITCHDEV
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#ifndef _LINUX_SYSCALLS_H
|
||||
#define _LINUX_SYSCALLS_H
|
||||
|
||||
struct __aio_sigset;
|
||||
struct epoll_event;
|
||||
struct iattr;
|
||||
struct inode;
|
||||
|
||||
@@ -75,7 +75,7 @@ struct uio_device {
|
||||
struct fasync_struct *async_queue;
|
||||
wait_queue_head_t wait;
|
||||
struct uio_info *info;
|
||||
spinlock_t info_lock;
|
||||
struct mutex info_lock;
|
||||
struct kobject *map_dir;
|
||||
struct kobject *portio_dir;
|
||||
};
|
||||
|
||||
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
|
||||
/**
|
||||
* cfg80211_rx_control_port - notification about a received control port frame
|
||||
* @dev: The device the frame matched to
|
||||
* @buf: control port frame
|
||||
* @len: length of the frame data
|
||||
* @addr: The peer from which the frame was received
|
||||
* @proto: frame protocol, typically PAE or Pre-authentication
|
||||
* @skb: The skbuf with the control port frame. It is assumed that the skbuf
|
||||
* is 802.3 formatted (with 802.3 header). The skb can be non-linear.
|
||||
* This function does not take ownership of the skb, so the caller is
|
||||
* responsible for any cleanup. The caller must also ensure that
|
||||
* skb->protocol is set appropriately.
|
||||
* @unencrypted: Whether the frame was received unencrypted
|
||||
*
|
||||
* This function is used to inform userspace about a received control port
|
||||
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
|
||||
* Return: %true if the frame was passed to userspace
|
||||
*/
|
||||
bool cfg80211_rx_control_port(struct net_device *dev,
|
||||
const u8 *buf, size_t len,
|
||||
const u8 *addr, u16 proto, bool unencrypted);
|
||||
struct sk_buff *skb, bool unencrypted);
|
||||
|
||||
/**
|
||||
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
|
||||
|
||||
@@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
|
||||
atomic_inc(&f6i->fib6_ref);
|
||||
}
|
||||
|
||||
static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
|
||||
{
|
||||
return atomic_inc_not_zero(&f6i->fib6_ref);
|
||||
}
|
||||
|
||||
static inline void fib6_info_release(struct fib6_info *f6i)
|
||||
{
|
||||
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
|
||||
|
||||
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
|
||||
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
|
||||
}
|
||||
|
||||
static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
|
||||
{
|
||||
return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
|
||||
RTF_GATEWAY;
|
||||
}
|
||||
|
||||
void ip6_route_input(struct sk_buff *skb);
|
||||
struct dst_entry *ip6_route_input_lookup(struct net *net,
|
||||
struct net_device *dev,
|
||||
|
||||
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
|
||||
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
|
||||
struct ipv6_txoptions *opt,
|
||||
int newtype,
|
||||
struct ipv6_opt_hdr __user *newopt,
|
||||
int newoptlen);
|
||||
struct ipv6_txoptions *
|
||||
ipv6_renew_options_kern(struct sock *sk,
|
||||
struct ipv6_txoptions *opt,
|
||||
int newtype,
|
||||
struct ipv6_opt_hdr *newopt,
|
||||
int newoptlen);
|
||||
struct ipv6_opt_hdr *newopt);
|
||||
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
|
||||
struct ipv6_txoptions *opt);
|
||||
|
||||
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
|
||||
* to minimize possbility that any useful information to an
|
||||
* attacker is leaked. Only lower 20 bits are relevant.
|
||||
*/
|
||||
rol32(hash, 16);
|
||||
hash = rol32(hash, 16);
|
||||
|
||||
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
|
||||
|
||||
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
|
||||
|
||||
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
|
||||
const struct in6_addr *addr);
|
||||
int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
|
||||
const struct in6_addr *addr, unsigned int mode);
|
||||
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
|
||||
const struct in6_addr *addr);
|
||||
#endif /* _NET_IPV6_H */
|
||||
|
||||
@@ -128,6 +128,7 @@ struct net {
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||||
struct netns_nf_frag nf_frag;
|
||||
struct ctl_table_header *nf_frag_frags_hdr;
|
||||
#endif
|
||||
struct sock *nfnl;
|
||||
struct sock *nfnl_stash;
|
||||
|
||||
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
|
||||
* @portid: netlink portID of the original message
|
||||
* @seq: netlink sequence number
|
||||
* @family: protocol family
|
||||
* @level: depth of the chains
|
||||
* @report: notify via unicast netlink message
|
||||
*/
|
||||
struct nft_ctx {
|
||||
@@ -160,6 +161,7 @@ struct nft_ctx {
|
||||
u32 portid;
|
||||
u32 seq;
|
||||
u8 family;
|
||||
u8 level;
|
||||
bool report;
|
||||
};
|
||||
|
||||
@@ -865,7 +867,6 @@ enum nft_chain_flags {
|
||||
* @table: table that this chain belongs to
|
||||
* @handle: chain handle
|
||||
* @use: number of jump references to this chain
|
||||
* @level: length of longest path to this chain
|
||||
* @flags: bitmask of enum nft_chain_flags
|
||||
* @name: name of the chain
|
||||
*/
|
||||
@@ -878,7 +879,6 @@ struct nft_chain {
|
||||
struct nft_table *table;
|
||||
u64 handle;
|
||||
u32 use;
|
||||
u16 level;
|
||||
u8 flags:6,
|
||||
genmask:2;
|
||||
char *name;
|
||||
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
|
||||
u32 genmask:2,
|
||||
use:30;
|
||||
u64 handle;
|
||||
char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
|
||||
/* runtime data below here */
|
||||
struct nf_hook_ops *ops ____cacheline_aligned;
|
||||
struct nf_flowtable data;
|
||||
|
||||
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
|
||||
extern struct static_key_false nft_counters_enabled;
|
||||
extern struct static_key_false nft_trace_enabled;
|
||||
|
||||
extern struct nft_set_type nft_set_rhash_type;
|
||||
extern struct nft_set_type nft_set_hash_type;
|
||||
extern struct nft_set_type nft_set_hash_fast_type;
|
||||
extern struct nft_set_type nft_set_rbtree_type;
|
||||
extern struct nft_set_type nft_set_bitmap_type;
|
||||
|
||||
#endif /* _NET_NF_TABLES_CORE_H */
|
||||
|
||||
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
|
||||
* belonging to established connections going through that one.
|
||||
*/
|
||||
struct sock *
|
||||
nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
|
||||
nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
|
||||
const u8 protocol,
|
||||
const __be32 saddr, const __be32 daddr,
|
||||
const __be16 sport, const __be16 dport,
|
||||
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
|
||||
struct sock *sk);
|
||||
|
||||
struct sock *
|
||||
nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
|
||||
nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
|
||||
const u8 protocol,
|
||||
const struct in6_addr *saddr, const struct in6_addr *daddr,
|
||||
const __be16 sport, const __be16 dport,
|
||||
|
||||
@@ -109,7 +109,6 @@ struct netns_ipv6 {
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||||
struct netns_nf_frag {
|
||||
struct netns_sysctl_ipv6 sysctl;
|
||||
struct netns_frags frags;
|
||||
};
|
||||
#endif
|
||||
|
||||
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
||||
{
|
||||
return NULL;
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include <linux/tc_act/tc_csum.h>
|
||||
|
||||
struct tcf_csum_params {
|
||||
int action;
|
||||
u32 update_flags;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
struct tcf_tunnel_key_params {
|
||||
struct rcu_head rcu;
|
||||
int tcft_action;
|
||||
int action;
|
||||
struct metadata_dst *tcft_enc_metadata;
|
||||
};
|
||||
|
||||
|
||||
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags);
|
||||
|
||||
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
|
||||
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
||||
const unsigned int pkts)
|
||||
{
|
||||
@@ -539,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
|
||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
||||
int tcp_send_synack(struct sock *);
|
||||
void tcp_push_one(struct sock *, unsigned int mss_now);
|
||||
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
|
||||
void tcp_send_ack(struct sock *sk);
|
||||
void tcp_send_delayed_ack(struct sock *sk);
|
||||
void tcp_send_loss_probe(struct sock *sk);
|
||||
@@ -828,12 +830,21 @@ struct tcp_skb_cb {
|
||||
|
||||
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
|
||||
|
||||
static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
|
||||
{
|
||||
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
/* This is the variant of inet6_iif() that must be used by TCP,
|
||||
* as TCP moves IP6CB into a different location in skb->cb[]
|
||||
*/
|
||||
static inline int tcp_v6_iif(const struct sk_buff *skb)
|
||||
{
|
||||
return TCP_SKB_CB(skb)->header.h6.iif;
|
||||
}
|
||||
|
||||
static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
|
||||
{
|
||||
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
|
||||
|
||||
@@ -908,8 +919,6 @@ enum tcp_ca_event {
|
||||
CA_EVENT_LOSS, /* loss timeout */
|
||||
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
|
||||
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
|
||||
CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
|
||||
CA_EVENT_NON_DELAYED_ACK,
|
||||
};
|
||||
|
||||
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
|
||||
|
||||
@@ -60,6 +60,10 @@ struct xdp_sock {
|
||||
bool zc;
|
||||
/* Protects multiple processes in the control path */
|
||||
struct mutex mutex;
|
||||
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
|
||||
* in the SKB destructor callback.
|
||||
*/
|
||||
spinlock_t tx_completion_lock;
|
||||
u64 rx_dropped;
|
||||
};
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/signal.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
typedef __kernel_ulong_t aio_context_t;
|
||||
@@ -110,10 +109,5 @@ struct iocb {
|
||||
#undef IFBIG
|
||||
#undef IFLITTLE
|
||||
|
||||
struct __aio_sigset {
|
||||
const sigset_t __user *sigmask;
|
||||
size_t sigsetsize;
|
||||
};
|
||||
|
||||
#endif /* __LINUX__AIO_ABI_H */
|
||||
|
||||
|
||||
@@ -1857,7 +1857,8 @@ union bpf_attr {
|
||||
* is resolved), the nexthop address is returned in ipv4_dst
|
||||
* or ipv6_dst based on family, smac is set to mac address of
|
||||
* egress device, dmac is set to nexthop mac address, rt_metric
|
||||
* is set to metric from route (IPv4/IPv6 only).
|
||||
* is set to metric from route (IPv4/IPv6 only), and ifindex
|
||||
* is set to the device index of the nexthop from the FIB lookup.
|
||||
*
|
||||
* *plen* argument is the size of the passed in struct.
|
||||
* *flags* argument can be a combination of one or more of the
|
||||
@@ -1873,9 +1874,10 @@ union bpf_attr {
|
||||
* *ctx* is either **struct xdp_md** for XDP programs or
|
||||
* **struct sk_buff** tc cls_act programs.
|
||||
* Return
|
||||
* Egress device index on success, 0 if packet needs to continue
|
||||
* up the stack for further processing or a negative error in case
|
||||
* of failure.
|
||||
* * < 0 if any input argument is invalid
|
||||
* * 0 on success (packet is forwarded, nexthop neighbor exists)
|
||||
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
|
||||
* * packet is not forwarded or needs assist from full stack
|
||||
*
|
||||
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
|
||||
* Description
|
||||
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
|
||||
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
|
||||
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
|
||||
|
||||
enum {
|
||||
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
|
||||
BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
|
||||
BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
|
||||
BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
|
||||
BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
|
||||
BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
|
||||
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
|
||||
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
|
||||
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
|
||||
};
|
||||
|
||||
struct bpf_fib_lookup {
|
||||
/* input: network family for lookup (AF_INET, AF_INET6)
|
||||
* output: network family of egress nexthop
|
||||
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
|
||||
|
||||
/* total length of packet from network header - used for MTU check */
|
||||
__u16 tot_len;
|
||||
__u32 ifindex; /* L3 device index for lookup */
|
||||
|
||||
/* input: L3 device index for lookup
|
||||
* output: device index from FIB lookup
|
||||
*/
|
||||
__u32 ifindex;
|
||||
|
||||
union {
|
||||
/* inputs to lookup */
|
||||
|
||||
@@ -76,7 +76,7 @@ struct btf_type {
|
||||
*/
|
||||
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
|
||||
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
|
||||
#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
|
||||
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
|
||||
|
||||
/* Attributes stored in the BTF_INT_ENCODING */
|
||||
#define BTF_INT_SIGNED (1 << 0)
|
||||
|
||||
@@ -226,7 +226,7 @@ enum tunable_id {
|
||||
ETHTOOL_TX_COPYBREAK,
|
||||
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
|
||||
/*
|
||||
* Add your fresh new tubale attribute above and remember to update
|
||||
* Add your fresh new tunable attribute above and remember to update
|
||||
* tunable_strings[] in net/core/ethtool.c
|
||||
*/
|
||||
__ETHTOOL_TUNABLE_COUNT,
|
||||
|
||||
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
|
||||
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
|
||||
|
||||
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -10,13 +10,8 @@
|
||||
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# include <linux/types.h>
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#endif
|
||||
|
||||
#include <linux/types_32_64.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
enum rseq_cpu_id_state {
|
||||
RSEQ_CPU_ID_UNINITIALIZED = -1,
|
||||
@@ -52,10 +47,10 @@ struct rseq_cs {
|
||||
__u32 version;
|
||||
/* enum rseq_cs_flags */
|
||||
__u32 flags;
|
||||
LINUX_FIELD_u32_u64(start_ip);
|
||||
__u64 start_ip;
|
||||
/* Offset from start_ip. */
|
||||
LINUX_FIELD_u32_u64(post_commit_offset);
|
||||
LINUX_FIELD_u32_u64(abort_ip);
|
||||
__u64 post_commit_offset;
|
||||
__u64 abort_ip;
|
||||
} __attribute__((aligned(4 * sizeof(__u64))));
|
||||
|
||||
/*
|
||||
@@ -67,28 +62,30 @@ struct rseq_cs {
|
||||
struct rseq {
|
||||
/*
|
||||
* Restartable sequences cpu_id_start field. Updated by the
|
||||
* kernel, and read by user-space with single-copy atomicity
|
||||
* semantics. Aligned on 32-bit. Always contains a value in the
|
||||
* range of possible CPUs, although the value may not be the
|
||||
* actual current CPU (e.g. if rseq is not initialized). This
|
||||
* CPU number value should always be compared against the value
|
||||
* of the cpu_id field before performing a rseq commit or
|
||||
* returning a value read from a data structure indexed using
|
||||
* the cpu_id_start value.
|
||||
* kernel. Read by user-space with single-copy atomicity
|
||||
* semantics. This field should only be read by the thread which
|
||||
* registered this data structure. Aligned on 32-bit. Always
|
||||
* contains a value in the range of possible CPUs, although the
|
||||
* value may not be the actual current CPU (e.g. if rseq is not
|
||||
* initialized). This CPU number value should always be compared
|
||||
* against the value of the cpu_id field before performing a rseq
|
||||
* commit or returning a value read from a data structure indexed
|
||||
* using the cpu_id_start value.
|
||||
*/
|
||||
__u32 cpu_id_start;
|
||||
/*
|
||||
* Restartable sequences cpu_id field. Updated by the kernel,
|
||||
* and read by user-space with single-copy atomicity semantics.
|
||||
* Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
|
||||
* RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
|
||||
* former means "rseq uninitialized", and latter means "rseq
|
||||
* initialization failed". This value is meant to be read within
|
||||
* rseq critical sections and compared with the cpu_id_start
|
||||
* value previously read, before performing the commit instruction,
|
||||
* or read and compared with the cpu_id_start value before returning
|
||||
* a value loaded from a data structure indexed using the
|
||||
* cpu_id_start value.
|
||||
* Restartable sequences cpu_id field. Updated by the kernel.
|
||||
* Read by user-space with single-copy atomicity semantics. This
|
||||
* field should only be read by the thread which registered this
|
||||
* data structure. Aligned on 32-bit. Values
|
||||
* RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
|
||||
* have a special semantic: the former means "rseq uninitialized",
|
||||
* and latter means "rseq initialization failed". This value is
|
||||
* meant to be read within rseq critical sections and compared
|
||||
* with the cpu_id_start value previously read, before performing
|
||||
* the commit instruction, or read and compared with the
|
||||
* cpu_id_start value before returning a value loaded from a data
|
||||
* structure indexed using the cpu_id_start value.
|
||||
*/
|
||||
__u32 cpu_id;
|
||||
/*
|
||||
@@ -105,27 +102,44 @@ struct rseq {
|
||||
* targeted by the rseq_cs. Also needs to be set to NULL by user-space
|
||||
* before reclaiming memory that contains the targeted struct rseq_cs.
|
||||
*
|
||||
* Read and set by the kernel with single-copy atomicity semantics.
|
||||
* Set by user-space with single-copy atomicity semantics. Aligned
|
||||
* on 64-bit.
|
||||
* Read and set by the kernel. Set by user-space with single-copy
|
||||
* atomicity semantics. This field should only be updated by the
|
||||
* thread which registered this data structure. Aligned on 64-bit.
|
||||
*/
|
||||
LINUX_FIELD_u32_u64(rseq_cs);
|
||||
union {
|
||||
__u64 ptr64;
|
||||
#ifdef __LP64__
|
||||
__u64 ptr;
|
||||
#else
|
||||
struct {
|
||||
#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
|
||||
__u32 padding; /* Initialized to zero. */
|
||||
__u32 ptr32;
|
||||
#else /* LITTLE */
|
||||
__u32 ptr32;
|
||||
__u32 padding; /* Initialized to zero. */
|
||||
#endif /* ENDIAN */
|
||||
} ptr;
|
||||
#endif
|
||||
} rseq_cs;
|
||||
|
||||
/*
|
||||
* - RSEQ_DISABLE flag:
|
||||
* Restartable sequences flags field.
|
||||
*
|
||||
* This field should only be updated by the thread which
|
||||
* registered this data structure. Read by the kernel.
|
||||
* Mainly used for single-stepping through rseq critical sections
|
||||
* with debuggers.
|
||||
*
|
||||
* Fallback fast-track flag for single-stepping.
|
||||
* Set by user-space if lack of progress is detected.
|
||||
* Cleared by user-space after rseq finish.
|
||||
* Read by the kernel.
|
||||
* - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
|
||||
* Inhibit instruction sequence block restart and event
|
||||
* counter increment on preemption for this thread.
|
||||
* Inhibit instruction sequence block restart on preemption
|
||||
* for this thread.
|
||||
* - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
|
||||
* Inhibit instruction sequence block restart and event
|
||||
* counter increment on signal delivery for this thread.
|
||||
* Inhibit instruction sequence block restart on signal
|
||||
* delivery for this thread.
|
||||
* - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
|
||||
* Inhibit instruction sequence block restart and event
|
||||
* counter increment on migration for this thread.
|
||||
* Inhibit instruction sequence block restart on migration for
|
||||
* this thread.
|
||||
*/
|
||||
__u32 flags;
|
||||
} __attribute__((aligned(4 * sizeof(__u64))));
|
||||
|
||||
@@ -127,6 +127,10 @@ enum {
|
||||
|
||||
#define TCP_CM_INQ TCP_INQ
|
||||
|
||||
#define TCP_REPAIR_ON 1
|
||||
#define TCP_REPAIR_OFF 0
|
||||
#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
|
||||
|
||||
struct tcp_repair_opt {
|
||||
__u32 opt_code;
|
||||
__u32 opt_val;
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
|
||||
#ifndef _UAPI_LINUX_TYPES_32_64_H
|
||||
#define _UAPI_LINUX_TYPES_32_64_H
|
||||
|
||||
/*
|
||||
* linux/types_32_64.h
|
||||
*
|
||||
* Integer type declaration for pointers across 32-bit and 64-bit systems.
|
||||
*
|
||||
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# include <linux/types.h>
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#endif
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#ifdef __BYTE_ORDER
|
||||
# if (__BYTE_ORDER == __BIG_ENDIAN)
|
||||
# define LINUX_BYTE_ORDER_BIG_ENDIAN
|
||||
# else
|
||||
# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
|
||||
# endif
|
||||
#else
|
||||
# ifdef __BIG_ENDIAN
|
||||
# define LINUX_BYTE_ORDER_BIG_ENDIAN
|
||||
# else
|
||||
# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __LP64__
|
||||
# define LINUX_FIELD_u32_u64(field) __u64 field
|
||||
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
|
||||
#else
|
||||
# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
|
||||
# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
|
||||
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
|
||||
field ## _padding = 0, field = (intptr_t)v
|
||||
# else
|
||||
# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
|
||||
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
|
||||
field = (intptr_t)v, field ## _padding = 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI_LINUX_TYPES_32_64_H */
|
||||
Reference in New Issue
Block a user