mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 11:33:58 -04:00
spi: Merge up fixes
A patch for Qualcomm depends on some fixes.
This commit is contained in:
@@ -270,6 +270,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
|
||||
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_copy_and_extend(unsigned long *to,
|
||||
const unsigned long *from,
|
||||
unsigned int count, unsigned int size)
|
||||
{
|
||||
unsigned int copy = BITS_TO_LONGS(count);
|
||||
|
||||
memcpy(to, from, copy * sizeof(long));
|
||||
if (count % BITS_PER_LONG)
|
||||
to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
|
||||
memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
|
||||
}
|
||||
|
||||
/*
|
||||
* On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
|
||||
* machines the order of hi and lo parts of numbers match the bitmap structure.
|
||||
|
||||
@@ -1296,12 +1296,7 @@ bdev_max_secure_erase_sectors(struct block_device *bdev)
|
||||
|
||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return q->limits.max_write_zeroes_sectors;
|
||||
|
||||
return 0;
|
||||
return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors;
|
||||
}
|
||||
|
||||
static inline bool bdev_nonrot(struct block_device *bdev)
|
||||
|
||||
@@ -856,8 +856,8 @@ static inline u32 type_flag(u32 type)
|
||||
/* only use after check_attach_btf_id() */
|
||||
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
|
||||
{
|
||||
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
|
||||
prog->aux->dst_prog->type : prog->type;
|
||||
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
|
||||
prog->aux->saved_dst_prog_type : prog->type;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
|
||||
|
||||
@@ -296,6 +296,15 @@ static inline void *offset_to_ptr(const int *off)
|
||||
#define is_signed_type(type) (((type)(-1)) < (__force type)1)
|
||||
#define is_unsigned_type(type) (!is_signed_type(type))
|
||||
|
||||
/*
|
||||
* Useful shorthand for "is this condition known at compile-time?"
|
||||
*
|
||||
* Note that the condition may involve non-constant values,
|
||||
* but the compiler may know enough about the details of the
|
||||
* values to determine that the condition is statically true.
|
||||
*/
|
||||
#define statically_true(x) (__builtin_constant_p(x) && (x))
|
||||
|
||||
/*
|
||||
* This is needed in functions which generate the stack canary, see
|
||||
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
|
||||
|
||||
@@ -100,7 +100,6 @@ enum cpuhp_state {
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
CPUHP_PROFILE_PREPARE,
|
||||
CPUHP_X2APIC_PREPARE,
|
||||
CPUHP_SMPCFD_PREPARE,
|
||||
CPUHP_RELAY_PREPARE,
|
||||
@@ -148,6 +147,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_IRQ_LOONGARCH_STARTING,
|
||||
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
|
||||
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
|
||||
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_STARTING,
|
||||
|
||||
@@ -1037,7 +1037,7 @@ void init_cpu_online(const struct cpumask *src);
|
||||
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
|
||||
|
||||
#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
|
||||
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled))
|
||||
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
|
||||
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
|
||||
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
|
||||
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#ifndef _NET_DSA_TAG_OCELOT_H
|
||||
#define _NET_DSA_TAG_OCELOT_H
|
||||
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/packing.h>
|
||||
#include <linux/skbuff.h>
|
||||
@@ -273,4 +275,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
|
||||
return rew_op;
|
||||
}
|
||||
|
||||
/**
|
||||
* ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
|
||||
* @skb: Pointer to socket buffer
|
||||
* @br: Pointer to bridge device that the port is under, if any
|
||||
* @vlan_tci:
|
||||
* @tag_type:
|
||||
*
|
||||
* If the port is under a VLAN-aware bridge, remove the VLAN header from the
|
||||
* payload and move it into the DSA tag, which will make the switch classify
|
||||
* the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
|
||||
* which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
|
||||
* of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
|
||||
* Anyway, VID 0 is fine because it is stripped on egress for these port modes,
|
||||
* and source address learning is not performed for packets injected from the
|
||||
* CPU anyway, so it doesn't matter that the VID is "wrong".
|
||||
*/
|
||||
static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
|
||||
struct net_device *br,
|
||||
u64 *vlan_tci, u64 *tag_type)
|
||||
{
|
||||
struct vlan_ethhdr *hdr;
|
||||
u16 proto, tci;
|
||||
|
||||
if (!br || !br_vlan_enabled(br)) {
|
||||
*vlan_tci = 0;
|
||||
*tag_type = IFH_TAG_TYPE_C;
|
||||
return;
|
||||
}
|
||||
|
||||
hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
|
||||
br_vlan_get_proto(br, &proto);
|
||||
|
||||
if (ntohs(hdr->h_vlan_proto) == proto) {
|
||||
vlan_remove_tag(skb, &tci);
|
||||
*vlan_tci = tci;
|
||||
} else {
|
||||
rcu_read_lock();
|
||||
br_vlan_get_pvid_rcu(br, &tci);
|
||||
rcu_read_unlock();
|
||||
*vlan_tci = tci;
|
||||
}
|
||||
|
||||
*tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -736,10 +736,10 @@ struct kernel_ethtool_ts_info {
|
||||
* @rxfh_key_space: same as @rxfh_indir_space, but for the key.
|
||||
* @rxfh_priv_size: size of the driver private data area the core should
|
||||
* allocate for an RSS context (in &struct ethtool_rxfh_context).
|
||||
* @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this
|
||||
* is zero then the core may choose any (nonzero) ID, otherwise the core
|
||||
* will only use IDs strictly less than this value, as the @rss_context
|
||||
* argument to @create_rxfh_context and friends.
|
||||
* @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID.
|
||||
* If this is zero then the core may choose any (nonzero) ID, otherwise
|
||||
* the core will only use IDs strictly less than this value, as the
|
||||
* @rss_context argument to @create_rxfh_context and friends.
|
||||
* @supported_coalesce_params: supported types of interrupt coalescing.
|
||||
* @supported_ring_params: supported ring params.
|
||||
* @get_drvinfo: Report driver/device information. Modern drivers no
|
||||
@@ -954,7 +954,7 @@ struct ethtool_ops {
|
||||
u32 rxfh_indir_space;
|
||||
u16 rxfh_key_space;
|
||||
u16 rxfh_priv_size;
|
||||
u32 rxfh_max_context_id;
|
||||
u32 rxfh_max_num_contexts;
|
||||
u32 supported_coalesce_params;
|
||||
u32 supported_ring_params;
|
||||
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
|
||||
|
||||
@@ -110,7 +110,7 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
|
||||
*
|
||||
* f = dentry_open(&path, O_RDONLY, current_cred());
|
||||
* if (IS_ERR(f))
|
||||
* return PTR_ERR(fd);
|
||||
* return PTR_ERR(f);
|
||||
*
|
||||
* fd_install(fd, f);
|
||||
* return take_fd(fd);
|
||||
|
||||
@@ -2392,6 +2392,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||||
*
|
||||
* I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
|
||||
*
|
||||
* I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
|
||||
* i_count.
|
||||
*
|
||||
* Q: What is the difference between I_WILL_FREE and I_FREEING?
|
||||
*/
|
||||
#define I_DIRTY_SYNC (1 << 0)
|
||||
@@ -2415,6 +2418,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||||
#define I_DONTCACHE (1 << 16)
|
||||
#define I_SYNC_QUEUED (1 << 17)
|
||||
#define I_PINNING_NETFS_WB (1 << 18)
|
||||
#define __I_LRU_ISOLATING 19
|
||||
#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
|
||||
|
||||
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
||||
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
|
||||
|
||||
@@ -944,10 +944,37 @@ static inline bool htlb_allow_alloc_fallback(int reason)
|
||||
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
||||
struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
if (huge_page_size(h) == PMD_SIZE)
|
||||
const unsigned long size = huge_page_size(h);
|
||||
|
||||
VM_WARN_ON(size == PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* hugetlb must use the exact same PT locks as core-mm page table
|
||||
* walkers would. When modifying a PTE table, hugetlb must take the
|
||||
* PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
|
||||
* PT lock etc.
|
||||
*
|
||||
* The expectation is that any hugetlb folio smaller than a PMD is
|
||||
* always mapped into a single PTE table and that any hugetlb folio
|
||||
* smaller than a PUD (but at least as big as a PMD) is always mapped
|
||||
* into a single PMD table.
|
||||
*
|
||||
* If that does not hold for an architecture, then that architecture
|
||||
* must disable split PT locks such that all *_lockptr() functions
|
||||
* will give us the same result: the per-MM PT lock.
|
||||
*
|
||||
* Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
|
||||
* PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
|
||||
* and core-mm would use pmd_lockptr(). However, in such configurations
|
||||
* split PMD locks are disabled -- they don't make sense on a single
|
||||
* PGDIR page table -- and the end result is the same.
|
||||
*/
|
||||
if (size >= PUD_SIZE)
|
||||
return pud_lockptr(mm, (pud_t *) pte);
|
||||
else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
|
||||
return pmd_lockptr(mm, (pmd_t *) pte);
|
||||
VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
|
||||
return &mm->page_table_lock;
|
||||
/* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
|
||||
return ptep_lockptr(mm, pte);
|
||||
}
|
||||
|
||||
#ifndef hugepages_supported
|
||||
|
||||
@@ -1066,7 +1066,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
|
||||
struct acpi_resource;
|
||||
struct acpi_resource_i2c_serialbus;
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
|
||||
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
|
||||
struct acpi_resource_i2c_serialbus **i2c);
|
||||
int i2c_acpi_client_count(struct acpi_device *adev);
|
||||
|
||||
@@ -795,8 +795,6 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
|
||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
|
||||
@@ -715,6 +715,13 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef kvm_arch_has_readonly_mem
|
||||
static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct kvm_memslots {
|
||||
u64 generation;
|
||||
atomic_long_t last_used_slot;
|
||||
@@ -2414,7 +2421,7 @@ static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn
|
||||
}
|
||||
|
||||
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
|
||||
unsigned long attrs);
|
||||
unsigned long mask, unsigned long attrs);
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range);
|
||||
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
|
||||
@@ -2445,11 +2452,11 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
|
||||
}
|
||||
#endif /* CONFIG_KVM_PRIVATE_MEM */
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
|
||||
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
|
||||
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
|
||||
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
|
||||
/**
|
||||
* kvm_gmem_populate() - Populate/prepare a GPA range with guest data
|
||||
*
|
||||
@@ -2476,8 +2483,9 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
|
||||
|
||||
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
|
||||
kvm_gmem_populate_cb post_populate, void *opaque);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
|
||||
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
|
||||
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -26,19 +26,63 @@
|
||||
#define __typecheck(x, y) \
|
||||
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
|
||||
|
||||
/* is_signed_type() isn't a constexpr for pointer types */
|
||||
#define __is_signed(x) \
|
||||
__builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
|
||||
is_signed_type(typeof(x)), 0)
|
||||
/*
|
||||
* __sign_use for integer expressions:
|
||||
* bit #0 set if ok for unsigned comparisons
|
||||
* bit #1 set if ok for signed comparisons
|
||||
*
|
||||
* In particular, statically non-negative signed integer
|
||||
* expressions are ok for both.
|
||||
*
|
||||
* NOTE! Unsigned types smaller than 'int' are implicitly
|
||||
* converted to 'int' in expressions, and are accepted for
|
||||
* signed conversions for now. This is debatable.
|
||||
*
|
||||
* Note that 'x' is the original expression, and 'ux' is
|
||||
* the unique variable that contains the value.
|
||||
*
|
||||
* We use 'ux' for pure type checking, and 'x' for when
|
||||
* we need to look at the value (but without evaluating
|
||||
* it for side effects! Careful to only ever evaluate it
|
||||
* with sizeof() or __builtin_constant_p() etc).
|
||||
*
|
||||
* Pointers end up being checked by the normal C type
|
||||
* rules at the actual comparison, and these expressions
|
||||
* only need to be careful to not cause warnings for
|
||||
* pointer use.
|
||||
*/
|
||||
#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
|
||||
#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
|
||||
#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
|
||||
__signed_type_use(x,ux):__unsigned_type_use(x,ux))
|
||||
|
||||
/* True for a non-negative signed int constant */
|
||||
#define __is_noneg_int(x) \
|
||||
(__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
|
||||
/*
|
||||
* To avoid warnings about casting pointers to integers
|
||||
* of different sizes, we need that special sign type.
|
||||
*
|
||||
* On 64-bit we can just always use 'long', since any
|
||||
* integer or pointer type can just be cast to that.
|
||||
*
|
||||
* This does not work for 128-bit signed integers since
|
||||
* the cast would truncate them, but we do not use s128
|
||||
* types in the kernel (we do use 'u128', but they will
|
||||
* be handled by the !is_signed_type() case).
|
||||
*
|
||||
* NOTE! The cast is there only to avoid any warnings
|
||||
* from when values that aren't signed integer types.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __signed_type(ux) long
|
||||
#else
|
||||
#define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
|
||||
#endif
|
||||
#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
|
||||
|
||||
#define __types_ok(x, y) \
|
||||
(__is_signed(x) == __is_signed(y) || \
|
||||
__is_signed((x) + 0) == __is_signed((y) + 0) || \
|
||||
__is_noneg_int(x) || __is_noneg_int(y))
|
||||
#define __types_ok(x,y,ux,uy) \
|
||||
(__sign_use(x,ux) & __sign_use(y,uy))
|
||||
|
||||
#define __types_ok3(x,y,z,ux,uy,uz) \
|
||||
(__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
|
||||
|
||||
#define __cmp_op_min <
|
||||
#define __cmp_op_max >
|
||||
@@ -51,34 +95,31 @@
|
||||
#define __cmp_once(op, type, x, y) \
|
||||
__cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
|
||||
|
||||
#define __careful_cmp_once(op, x, y) ({ \
|
||||
static_assert(__types_ok(x, y), \
|
||||
#op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
|
||||
__cmp_once(op, __auto_type, x, y); })
|
||||
#define __careful_cmp_once(op, x, y, ux, uy) ({ \
|
||||
__auto_type ux = (x); __auto_type uy = (y); \
|
||||
BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \
|
||||
#op"("#x", "#y") signedness error"); \
|
||||
__cmp(op, ux, uy); })
|
||||
|
||||
#define __careful_cmp(op, x, y) \
|
||||
__builtin_choose_expr(__is_constexpr((x) - (y)), \
|
||||
__cmp(op, x, y), __careful_cmp_once(op, x, y))
|
||||
#define __careful_cmp(op, x, y) \
|
||||
__careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
|
||||
|
||||
#define __clamp(val, lo, hi) \
|
||||
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
|
||||
|
||||
#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
|
||||
typeof(val) unique_val = (val); \
|
||||
typeof(lo) unique_lo = (lo); \
|
||||
typeof(hi) unique_hi = (hi); \
|
||||
#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \
|
||||
__auto_type uval = (val); \
|
||||
__auto_type ulo = (lo); \
|
||||
__auto_type uhi = (hi); \
|
||||
static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
|
||||
(lo) <= (hi), true), \
|
||||
"clamp() low limit " #lo " greater than high limit " #hi); \
|
||||
static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
|
||||
static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
|
||||
__clamp(unique_val, unique_lo, unique_hi); })
|
||||
BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \
|
||||
"clamp("#val", "#lo", "#hi") signedness error"); \
|
||||
__clamp(uval, ulo, uhi); })
|
||||
|
||||
#define __careful_clamp(val, lo, hi) ({ \
|
||||
__builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
|
||||
__clamp(val, lo, hi), \
|
||||
__clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
|
||||
__UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
|
||||
#define __careful_clamp(val, lo, hi) \
|
||||
__clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
|
||||
|
||||
/**
|
||||
* min - return minimum of two values of the same or compatible types
|
||||
@@ -111,13 +152,20 @@
|
||||
#define umax(x, y) \
|
||||
__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
|
||||
|
||||
#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
|
||||
__auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
|
||||
BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \
|
||||
#op"3("#x", "#y", "#z") signedness error"); \
|
||||
__cmp(op, ux, __cmp(op, uy, uz)); })
|
||||
|
||||
/**
|
||||
* min3 - return minimum of three values
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
* @z: third value
|
||||
*/
|
||||
#define min3(x, y, z) min((typeof(x))min(x, y), z)
|
||||
#define min3(x, y, z) \
|
||||
__careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
|
||||
|
||||
/**
|
||||
* max3 - return maximum of three values
|
||||
@@ -125,7 +173,8 @@
|
||||
* @y: second value
|
||||
* @z: third value
|
||||
*/
|
||||
#define max3(x, y, z) max((typeof(x))max(x, y), z)
|
||||
#define max3(x, y, z) \
|
||||
__careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
|
||||
|
||||
/**
|
||||
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
|
||||
@@ -277,6 +326,8 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
|
||||
* Use these carefully: no type checking, and uses the arguments
|
||||
* multiple times. Use for obvious constants only.
|
||||
*/
|
||||
#define MIN(a,b) __cmp(min,a,b)
|
||||
#define MAX(a,b) __cmp(max,a,b)
|
||||
#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
|
||||
#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
|
||||
|
||||
|
||||
@@ -2920,6 +2920,13 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
|
||||
return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
|
||||
}
|
||||
|
||||
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
|
||||
BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
|
||||
return ptlock_ptr(virt_to_ptdesc(pte));
|
||||
}
|
||||
|
||||
static inline bool ptlock_init(struct ptdesc *ptdesc)
|
||||
{
|
||||
/*
|
||||
@@ -2944,6 +2951,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
return &mm->page_table_lock;
|
||||
}
|
||||
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
return &mm->page_table_lock;
|
||||
}
|
||||
static inline void ptlock_cache_init(void) {}
|
||||
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
|
||||
static inline void ptlock_free(struct ptdesc *ptdesc) {}
|
||||
|
||||
@@ -220,8 +220,6 @@ enum node_stat_item {
|
||||
PGDEMOTE_KSWAPD,
|
||||
PGDEMOTE_DIRECT,
|
||||
PGDEMOTE_KHUGEPAGED,
|
||||
NR_MEMMAP, /* page metadata allocated through buddy allocator */
|
||||
NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
|
||||
NR_VM_NODE_STAT_ITEMS
|
||||
};
|
||||
|
||||
|
||||
@@ -73,8 +73,6 @@ struct netfs_inode {
|
||||
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
|
||||
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
|
||||
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
|
||||
#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
|
||||
* write to cache on read */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -269,7 +267,6 @@ struct netfs_io_request {
|
||||
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
|
||||
#define NETFS_RREQ_FAILED 4 /* The request failed */
|
||||
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
|
||||
#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
|
||||
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
|
||||
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
|
||||
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
|
||||
|
||||
@@ -16,6 +16,7 @@ extern void oops_enter(void);
|
||||
extern void oops_exit(void);
|
||||
extern bool oops_may_print(void);
|
||||
|
||||
extern bool panic_triggering_all_cpu_backtrace;
|
||||
extern int panic_timeout;
|
||||
extern unsigned long panic_print;
|
||||
extern int panic_on_oops;
|
||||
|
||||
@@ -43,6 +43,18 @@ static inline void put_page_tag_ref(union codetag_ref *ref)
|
||||
page_ext_put(page_ext_from_codetag_ref(ref));
|
||||
}
|
||||
|
||||
static inline void clear_page_tag_ref(struct page *page)
|
||||
{
|
||||
if (mem_alloc_profiling_enabled()) {
|
||||
union codetag_ref *ref = get_page_tag_ref(page);
|
||||
|
||||
if (ref) {
|
||||
set_codetag_empty(ref);
|
||||
put_page_tag_ref(ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
|
||||
unsigned int nr)
|
||||
{
|
||||
@@ -126,6 +138,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
|
||||
|
||||
static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; }
|
||||
static inline void put_page_tag_ref(union codetag_ref *ref) {}
|
||||
static inline void clear_page_tag_ref(struct page *page) {}
|
||||
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
|
||||
unsigned int nr) {}
|
||||
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
|
||||
#define CPU_PROFILING 1
|
||||
#define SCHED_PROFILING 2
|
||||
#define SLEEP_PROFILING 3
|
||||
#define KVM_PROFILING 4
|
||||
|
||||
struct proc_dir_entry;
|
||||
|
||||
@@ -266,12 +266,12 @@ bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (old == i) {
|
||||
if (old > 0 && old == i) {
|
||||
smp_acquire__after_ctrl_dep();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(old < 0 || old - i < 0))
|
||||
if (unlikely(old <= 0 || old - i < 0))
|
||||
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
|
||||
|
||||
return false;
|
||||
|
||||
@@ -193,7 +193,6 @@ void ring_buffer_set_clock(struct trace_buffer *buffer,
|
||||
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
|
||||
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
|
||||
|
||||
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
|
||||
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
|
||||
|
||||
struct buffer_data_read_page;
|
||||
|
||||
@@ -55,6 +55,7 @@ enum thermal_notify_event {
|
||||
THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
|
||||
THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
|
||||
THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
|
||||
THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -680,7 +680,7 @@ struct trace_event_file {
|
||||
* caching and such. Which is mostly OK ;-)
|
||||
*/
|
||||
unsigned long flags;
|
||||
atomic_t ref; /* ref count for opened files */
|
||||
refcount_t ref; /* ref count for opened files */
|
||||
atomic_t sm_ref; /* soft-mode reference counter */
|
||||
atomic_t tm_ref; /* trigger-mode reference counter */
|
||||
};
|
||||
@@ -880,7 +880,6 @@ do { \
|
||||
struct perf_event;
|
||||
|
||||
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
|
||||
DECLARE_PER_CPU(int, bpf_kprobe_override);
|
||||
|
||||
extern int perf_trace_init(struct perf_event *event);
|
||||
extern void perf_trace_destroy(struct perf_event *event);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
/**
|
||||
* struct virtqueue - a queue to register buffers for sending or receiving.
|
||||
@@ -109,6 +110,8 @@ struct virtio_admin_cmd {
|
||||
__le64 group_member_id;
|
||||
struct scatterlist *data_sg;
|
||||
struct scatterlist *result_sg;
|
||||
struct completion completion;
|
||||
int ret;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -104,8 +104,6 @@ struct virtqueue_info {
|
||||
* Returns 0 on success or error status
|
||||
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
|
||||
* set.
|
||||
* @create_avq: create admin virtqueue resource.
|
||||
* @destroy_avq: destroy admin virtqueue resource.
|
||||
*/
|
||||
struct virtio_config_ops {
|
||||
void (*get)(struct virtio_device *vdev, unsigned offset,
|
||||
@@ -133,8 +131,6 @@ struct virtio_config_ops {
|
||||
struct virtio_shm_region *region, u8 id);
|
||||
int (*disable_vq_and_reset)(struct virtqueue *vq);
|
||||
int (*enable_vq_after_reset)(struct virtqueue *vq);
|
||||
int (*create_avq)(struct virtio_device *vdev);
|
||||
void (*destroy_avq)(struct virtio_device *vdev);
|
||||
};
|
||||
|
||||
/* If driver didn't advertise the feature, it will never appear. */
|
||||
|
||||
@@ -56,7 +56,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
unsigned int thlen = 0;
|
||||
unsigned int p_off = 0;
|
||||
unsigned int ip_proto;
|
||||
u64 ret, remainder, gso_size;
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
@@ -99,16 +98,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
|
||||
|
||||
if (hdr->gso_size) {
|
||||
gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
|
||||
ret = div64_u64_rem(skb->len, gso_size, &remainder);
|
||||
if (!(ret && (hdr->gso_size > needed) &&
|
||||
((remainder > needed) || (remainder == 0)))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG;
|
||||
}
|
||||
|
||||
if (!pskb_may_pull(skb, needed))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -182,6 +171,11 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
if (gso_type != SKB_GSO_UDP_L4)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case SKB_GSO_TCPV4:
|
||||
case SKB_GSO_TCPV6:
|
||||
if (skb->csum_offset != offsetof(struct tcphdr, check))
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Kernel has a special handling for GSO_BY_FRAGS. */
|
||||
|
||||
@@ -34,10 +34,13 @@ struct reclaim_stat {
|
||||
unsigned nr_lazyfree_fail;
|
||||
};
|
||||
|
||||
enum writeback_stat_item {
|
||||
/* Stat data for system wide items */
|
||||
enum vm_stat_item {
|
||||
NR_DIRTY_THRESHOLD,
|
||||
NR_DIRTY_BG_THRESHOLD,
|
||||
NR_VM_WRITEBACK_STAT_ITEMS,
|
||||
NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */
|
||||
NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */
|
||||
NR_VM_STAT_ITEMS,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_VM_EVENT_COUNTERS
|
||||
@@ -514,21 +517,13 @@ static inline const char *lru_list_name(enum lru_list lru)
|
||||
return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
|
||||
}
|
||||
|
||||
static inline const char *writeback_stat_name(enum writeback_stat_item item)
|
||||
{
|
||||
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
|
||||
NR_VM_NUMA_EVENT_ITEMS +
|
||||
NR_VM_NODE_STAT_ITEMS +
|
||||
item];
|
||||
}
|
||||
|
||||
#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
|
||||
static inline const char *vm_event_name(enum vm_event_item item)
|
||||
{
|
||||
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
|
||||
NR_VM_NUMA_EVENT_ITEMS +
|
||||
NR_VM_NODE_STAT_ITEMS +
|
||||
NR_VM_WRITEBACK_STAT_ITEMS +
|
||||
NR_VM_STAT_ITEMS +
|
||||
item];
|
||||
}
|
||||
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
|
||||
@@ -625,7 +620,6 @@ static inline void lruvec_stat_sub_folio(struct folio *folio,
|
||||
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
void __meminit mod_node_early_perpage_metadata(int nid, long delta);
|
||||
void __meminit store_early_perpage_metadata(void);
|
||||
|
||||
void memmap_boot_pages_add(long delta);
|
||||
void memmap_pages_add(long delta);
|
||||
#endif /* _LINUX_VMSTAT_H */
|
||||
|
||||
Reference in New Issue
Block a user