Merge drm/drm-next into drm-misc-next

Backmerging to get fixes from v6.11-rc5.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
This commit is contained in:
Thomas Zimmermann
2024-08-30 14:14:29 +02:00
1500 changed files with 28567 additions and 12821 deletions

View File

@@ -660,12 +660,9 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void *context))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_execute_reg_methods(acpi_handle device,
u32 nax_depth,
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_execute_orphan_reg_method(acpi_handle device,
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_remove_address_space_handler(acpi_handle
device,

View File

@@ -50,6 +50,7 @@ enum acpi_backlight_type {
acpi_backlight_native,
acpi_backlight_nvidia_wmi_ec,
acpi_backlight_apple_gmux,
acpi_backlight_dell_uart,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)

View File

@@ -885,6 +885,8 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,

View File

@@ -27,6 +27,7 @@
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
#define DRM_BUDDY_CLEARED BIT(4)
#define DRM_BUDDY_TRIM_DISABLE BIT(5)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,6 +156,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
unsigned long flags);
int drm_buddy_block_trim(struct drm_buddy *mm,
u64 *start,
u64 new_size,
struct list_head *blocks);

View File

@@ -221,7 +221,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
* @data: Pointer to the devcoredump output buffer
* @data: Pointer to the devcoredump output buffer, can be NULL if using
* drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -266,6 +267,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
* The above example has a time complexity of O(N^2), where N is the size of the
* devcoredump. This is acceptable for small devcoredumps but scales poorly for
* larger ones.
*
* Another use case for drm_coredump_printer is to capture the devcoredump into
* a saved buffer before the dev_coredump() callback. This involves two passes:
* one to determine the size of the devcoredump and another to print it to a
* buffer. Then, in dev_coredump(), copy from the saved buffer into the
* devcoredump read buffer.
*
* For example::
*
* char *devcoredump_saved_buffer;
*
* ssize_t __coredump_print(char *buffer, ssize_t count, ...)
* {
* struct drm_print_iterator iter;
* struct drm_printer p;
*
* iter.data = buffer;
* iter.start = 0;
* iter.remain = count;
*
* p = drm_coredump_printer(&iter);
*
* drm_printf(p, "foo=%d\n", foo);
* ...
* return count - iter.remain;
* }
*
* void coredump_print(...)
* {
* ssize_t count;
*
* count = __coredump_print(NULL, INT_MAX, ...);
* devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
* __coredump_print(devcoredump_saved_buffer, count, ...);
* }
*
* void coredump_read(char *buffer, loff_t offset, size_t count,
* void *data, size_t datalen)
* {
* ...
* memcpy(buffer, devcoredump_saved_buffer + offset, count);
* ...
* }
*
* The above example has a time complexity of O(N*2), where N is the size of the
* devcoredump. This scales better than the previous example for larger
* devcoredumps.
*
* RETURNS:
* The &drm_printer object
*/

View File

@@ -772,15 +772,18 @@
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
/* MTL */
#define INTEL_MTL_IDS(MACRO__, ...) \
MACRO__(0x7D40, ## __VA_ARGS__), \
#define INTEL_ARL_IDS(MACRO__, ...) \
MACRO__(0x7D41, ## __VA_ARGS__), \
MACRO__(0x7D45, ## __VA_ARGS__), \
MACRO__(0x7D51, ## __VA_ARGS__), \
MACRO__(0x7D67, ## __VA_ARGS__), \
MACRO__(0x7DD1, ## __VA_ARGS__)
#define INTEL_MTL_IDS(MACRO__, ...) \
INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
MACRO__(0x7D40, ## __VA_ARGS__), \
MACRO__(0x7D45, ## __VA_ARGS__), \
MACRO__(0x7D55, ## __VA_ARGS__), \
MACRO__(0x7D60, ## __VA_ARGS__), \
MACRO__(0x7D67, ## __VA_ARGS__), \
MACRO__(0x7DD1, ## __VA_ARGS__), \
MACRO__(0x7DD5, ## __VA_ARGS__)
/* LNL */

View File

@@ -270,6 +270,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy_and_extend(unsigned long *to,
const unsigned long *from,
unsigned int count, unsigned int size)
{
unsigned int copy = BITS_TO_LONGS(count);
memcpy(to, from, copy * sizeof(long));
if (count % BITS_PER_LONG)
to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
}
/*
* On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
* machines the order of hi and lo parts of numbers match the bitmap structure.

View File

@@ -1296,12 +1296,7 @@ bdev_max_secure_erase_sectors(struct block_device *bdev)
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return q->limits.max_write_zeroes_sectors;
return 0;
return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors;
}
static inline bool bdev_nonrot(struct block_device *bdev)

View File

@@ -856,8 +856,8 @@ static inline u32 type_flag(u32 type)
/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
prog->aux->dst_prog->type : prog->type;
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
prog->aux->saved_dst_prog_type : prog->type;
}
static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)

View File

@@ -1037,7 +1037,7 @@ void init_cpu_online(const struct cpumask *src);
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled))
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))

View File

@@ -5,6 +5,8 @@
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/kthread.h>
#include <linux/packing.h>
#include <linux/skbuff.h>
@@ -273,4 +275,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
return rew_op;
}
/**
* ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
* @skb: Pointer to socket buffer
* @br: Pointer to bridge device that the port is under, if any
* @vlan_tci:
* @tag_type:
*
* If the port is under a VLAN-aware bridge, remove the VLAN header from the
* payload and move it into the DSA tag, which will make the switch classify
* the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
* which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
* of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
* Anyway, VID 0 is fine because it is stripped on egress for these port modes,
* and source address learning is not performed for packets injected from the
* CPU anyway, so it doesn't matter that the VID is "wrong".
*/
static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
struct net_device *br,
u64 *vlan_tci, u64 *tag_type)
{
struct vlan_ethhdr *hdr;
u16 proto, tci;
if (!br || !br_vlan_enabled(br)) {
*vlan_tci = 0;
*tag_type = IFH_TAG_TYPE_C;
return;
}
hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
br_vlan_get_proto(br, &proto);
if (ntohs(hdr->h_vlan_proto) == proto) {
vlan_remove_tag(skb, &tci);
*vlan_tci = tci;
} else {
rcu_read_lock();
br_vlan_get_pvid_rcu(br, &tci);
rcu_read_unlock();
*vlan_tci = tci;
}
*tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
}
#endif

View File

@@ -736,10 +736,10 @@ struct kernel_ethtool_ts_info {
* @rxfh_key_space: same as @rxfh_indir_space, but for the key.
* @rxfh_priv_size: size of the driver private data area the core should
* allocate for an RSS context (in &struct ethtool_rxfh_context).
* @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this
* is zero then the core may choose any (nonzero) ID, otherwise the core
* will only use IDs strictly less than this value, as the @rss_context
* argument to @create_rxfh_context and friends.
* @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID.
* If this is zero then the core may choose any (nonzero) ID, otherwise
* the core will only use IDs strictly less than this value, as the
* @rss_context argument to @create_rxfh_context and friends.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Modern drivers no
@@ -954,7 +954,7 @@ struct ethtool_ops {
u32 rxfh_indir_space;
u16 rxfh_key_space;
u16 rxfh_priv_size;
u32 rxfh_max_context_id;
u32 rxfh_max_num_contexts;
u32 supported_coalesce_params;
u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);

View File

@@ -110,7 +110,7 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
*
* f = dentry_open(&path, O_RDONLY, current_cred());
* if (IS_ERR(f))
* return PTR_ERR(fd);
* return PTR_ERR(f);
*
* fd_install(fd, f);
* return take_fd(fd);

View File

@@ -2392,6 +2392,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*
* I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
*
* I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
* i_count.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2415,6 +2418,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
#define I_PINNING_NETFS_WB (1 << 18)
#define __I_LRU_ISOLATING 19
#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)

View File

@@ -944,10 +944,37 @@ static inline bool htlb_allow_alloc_fallback(int reason)
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
if (huge_page_size(h) == PMD_SIZE)
const unsigned long size = huge_page_size(h);
VM_WARN_ON(size == PAGE_SIZE);
/*
* hugetlb must use the exact same PT locks as core-mm page table
* walkers would. When modifying a PTE table, hugetlb must take the
* PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
* PT lock etc.
*
* The expectation is that any hugetlb folio smaller than a PMD is
* always mapped into a single PTE table and that any hugetlb folio
* smaller than a PUD (but at least as big as a PMD) is always mapped
* into a single PMD table.
*
* If that does not hold for an architecture, then that architecture
* must disable split PT locks such that all *_lockptr() functions
* will give us the same result: the per-MM PT lock.
*
* Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
* PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
* and core-mm would use pmd_lockptr(). However, in such configurations
* split PMD locks are disabled -- they don't make sense on a single
* PGDIR page table -- and the end result is the same.
*/
if (size >= PUD_SIZE)
return pud_lockptr(mm, (pud_t *) pte);
else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
return pmd_lockptr(mm, (pmd_t *) pte);
VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
return &mm->page_table_lock;
/* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
return ptep_lockptr(mm, pte);
}
#ifndef hugepages_supported

View File

@@ -1066,7 +1066,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
struct acpi_resource;
struct acpi_resource_i2c_serialbus;
#if IS_ENABLED(CONFIG_ACPI)
#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
int i2c_acpi_client_count(struct acpi_device *adev);

View File

@@ -795,8 +795,6 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,

View File

@@ -715,6 +715,13 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
}
#endif
#ifndef kvm_arch_has_readonly_mem
static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
{
return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
}
#endif
struct kvm_memslots {
u64 generation;
atomic_long_t last_used_slot;

View File

@@ -2920,6 +2920,13 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
return ptlock_ptr(virt_to_ptdesc(pte));
}
static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
@@ -2944,6 +2951,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}

View File

@@ -220,8 +220,6 @@ enum node_stat_item {
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
NR_MEMMAP, /* page metadata allocated through buddy allocator */
NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
NR_VM_NODE_STAT_ITEMS
};

View File

@@ -73,8 +73,6 @@ struct netfs_inode {
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */
};
/*
@@ -269,7 +267,6 @@ struct netfs_io_request {
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */

View File

@@ -16,6 +16,7 @@ extern void oops_enter(void);
extern void oops_exit(void);
extern bool oops_may_print(void);
extern bool panic_triggering_all_cpu_backtrace;
extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;

View File

@@ -43,6 +43,18 @@ static inline void put_page_tag_ref(union codetag_ref *ref)
page_ext_put(page_ext_from_codetag_ref(ref));
}
static inline void clear_page_tag_ref(struct page *page)
{
if (mem_alloc_profiling_enabled()) {
union codetag_ref *ref = get_page_tag_ref(page);
if (ref) {
set_codetag_empty(ref);
put_page_tag_ref(ref);
}
}
}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr)
{
@@ -126,6 +138,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; }
static inline void put_page_tag_ref(union codetag_ref *ref) {}
static inline void clear_page_tag_ref(struct page *page) {}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}

View File

@@ -266,12 +266,12 @@ bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
if (oldp)
*oldp = old;
if (old == i) {
if (old > 0 && old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
if (unlikely(old < 0 || old - i < 0))
if (unlikely(old <= 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
return false;

View File

@@ -193,7 +193,6 @@ void ring_buffer_set_clock(struct trace_buffer *buffer,
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
struct buffer_data_read_page;

View File

@@ -902,12 +902,29 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
#if IS_ENABLED(CONFIG_ACPI)
#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index);
int acpi_spi_count_resources(struct acpi_device *adev);
#else
static inline struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
return NULL;
}
static inline struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index)
{
return ERR_PTR(-ENODEV);
}
static inline int acpi_spi_count_resources(struct acpi_device *adev)
{
return 0;
}
#endif
/*

View File

@@ -55,6 +55,7 @@ enum thermal_notify_event {
THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */
};
/**

View File

@@ -680,7 +680,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
atomic_t ref; /* ref count for opened files */
refcount_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
@@ -880,7 +880,6 @@ do { \
struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
DECLARE_PER_CPU(int, bpf_kprobe_override);
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);

View File

@@ -34,10 +34,13 @@ struct reclaim_stat {
unsigned nr_lazyfree_fail;
};
enum writeback_stat_item {
/* Stat data for system wide items */
enum vm_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
NR_VM_WRITEBACK_STAT_ITEMS,
NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */
NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */
NR_VM_STAT_ITEMS,
};
#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -514,21 +517,13 @@ static inline const char *lru_list_name(enum lru_list lru)
return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
}
static inline const char *writeback_stat_name(enum writeback_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
item];
}
#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
NR_VM_WRITEBACK_STAT_ITEMS +
NR_VM_STAT_ITEMS +
item];
}
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
@@ -625,7 +620,6 @@ static inline void lruvec_stat_sub_folio(struct folio *folio,
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
void __meminit mod_node_early_perpage_metadata(int nid, long delta);
void __meminit store_early_perpage_metadata(void);
void memmap_boot_pages_add(long delta);
void memmap_pages_add(long delta);
#endif /* _LINUX_VMSTAT_H */

View File

@@ -230,8 +230,12 @@ struct vsock_tap {
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);

View File

@@ -206,14 +206,17 @@ enum {
*/
HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
/* When this quirk is set, the controller has validated that
* LE states reported through the HCI_LE_READ_SUPPORTED_STATES are
* valid. This mechanism is necessary as many controllers have
* been seen has having trouble initiating a connectable
* advertisement despite the state combination being reported as
* supported.
/* When this quirk is set, the LE states reported through the
* HCI_LE_READ_SUPPORTED_STATES are invalid/broken.
*
* This mechanism is necessary as many controllers have been seen has
* having trouble initiating a connectable advertisement despite the
* state combination being reported as supported.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_VALID_LE_STATES,
HCI_QUIRK_BROKEN_LE_STATES,
/* When this quirk is set, then erroneous data reporting
* is ignored. This is mainly due to the fact that the HCI

View File

@@ -825,7 +825,7 @@ extern struct mutex hci_cb_list_lock;
} while (0)
#define hci_dev_le_state_simultaneous(hdev) \
(test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
(!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \
(hdev->le_states[4] & 0x08) && /* Central */ \
(hdev->le_states[4] & 0x40) && /* Peripheral */ \
(hdev->le_states[3] & 0x10)) /* Simultaneous */

View File

@@ -403,14 +403,18 @@ struct dsa_switch {
*/
u32 configure_vlan_while_not_filtering:1;
/* If the switch driver always programs the CPU port as egress tagged
* despite the VLAN configuration indicating otherwise, then setting
* @untag_bridge_pvid will force the DSA receive path to pop the
* bridge's default_pvid VLAN tagged frames to offer a consistent
* behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
* device.
/* Pop the default_pvid of VLAN-unaware bridge ports from tagged frames.
* DEPRECATED: Do NOT set this field in new drivers. Instead look at
* the dsa_software_vlan_untag() comments.
*/
u32 untag_bridge_pvid:1;
/* Pop the default_pvid of VLAN-aware bridge ports from tagged frames.
* Useful if the switch cannot preserve the VLAN tag as seen on the
* wire for user port ingress, and chooses to send all frames as
* VLAN-tagged to the CPU, including those which were originally
* untagged.
*/
u32 untag_vlan_aware_bridge_pvid:1;
/* Let DSA manage the FDB entries towards the
* CPU, based on the software bridge database.

View File

@@ -70,6 +70,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
struct mutex tx_mutex;
u32 tx_stopped : 1;
/* Don't use bit fields here, these are set under different locks */

View File

@@ -275,6 +275,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
int work_done_since_doorbell;
int budget;
};

View File

@@ -234,7 +234,7 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
{
unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
unsigned int shift = ilog2(scmd->device->sector_size);
return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
}

View File

@@ -813,6 +813,9 @@ struct ocelot {
const u32 *const *map;
struct list_head stats_regions;
spinlock_t inj_lock;
spinlock_t xtr_lock;
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
int packet_buffer_size;
int num_frame_refs;
@@ -966,10 +969,17 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
/* Packet I/O */
void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp);
void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp);
void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp);
void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp);
void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp);
void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp);
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb);
void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
u32 rew_op, struct sk_buff *skb);
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,

View File

@@ -13,6 +13,7 @@
*/
#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port))
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
@@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan {
struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
enum ocelot_vcap_bit dei; /* DEI */
enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
enum ocelot_vcap_bit tpid;
};
struct ocelot_vcap_key_etype {

View File

@@ -277,6 +277,11 @@ static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_ba
return 0;
}
static inline bool cs35l56_is_otp_register(unsigned int reg)
{
return (reg >> 16) == 3;
}
extern struct regmap_config cs35l56_regmap_i2c;
extern struct regmap_config cs35l56_regmap_spi;
extern struct regmap_config cs35l56_regmap_sdw;

View File

@@ -462,6 +462,11 @@ int snd_soc_component_force_enable_pin_unlocked(
const char *pin);
/* component controls */
struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component,
const char * const ctl);
struct snd_kcontrol *
snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component,
const char * const ctl);
int snd_soc_component_notify_control(struct snd_soc_component *component,
const char * const ctl);

View File

@@ -51,6 +51,7 @@
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
EM(netfs_rreq_trace_set_pause, "PAUSE ") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
EM(netfs_rreq_trace_unmark, "UNMARK ") \
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
@@ -145,6 +146,7 @@
EM(netfs_folio_trace_clear_g, "clear-g") \
EM(netfs_folio_trace_clear_s, "clear-s") \
EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
EM(netfs_folio_trace_end_copy, "end-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \
EM(netfs_folio_trace_kill_cc, "kill-cc") \

View File

@@ -2277,6 +2277,42 @@ DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on);
DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done);
DECLARE_EVENT_CLASS(rpcrdma_client_register_class,
TP_PROTO(
const struct ib_device *device,
const struct rpcrdma_notification *rn
),
TP_ARGS(device, rn),
TP_STRUCT__entry(
__string(name, device->name)
__field(void *, callback)
__field(u32, index)
),
TP_fast_assign(
__assign_str(name);
__entry->callback = rn->rn_done;
__entry->index = rn->rn_index;
),
TP_printk("device=%s index=%u done callback=%pS\n",
__get_str(name), __entry->index, __entry->callback
)
);
#define DEFINE_CLIENT_REGISTER_EVENT(name) \
DEFINE_EVENT(rpcrdma_client_register_class, name, \
TP_PROTO( \
const struct ib_device *device, \
const struct rpcrdma_notification *rn \
), \
TP_ARGS(device, rn))
DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register);
DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister);
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>

View File

@@ -702,6 +702,31 @@ extern "C" {
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
* Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
* on integrated graphics
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all compressible GEM objects.
*/
#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
/*
* Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
* on discrete graphics
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all compressible GEM objects. The GEM object must be stored in
* contiguous memory with a size aligned to 64KB
*/
#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*

View File

@@ -421,7 +421,7 @@ enum io_uring_msg_ring_flags {
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
__u64 user_data; /* sqe->data submission passed back */
__u64 user_data; /* sqe->user_data value passed back */
__s32 res; /* result code for this event */
__u32 flags;

View File

@@ -42,9 +42,10 @@
* - 1.14 - Update kfd_event_data
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
* - 1.16 - Add contiguous VRAM allocation flag
* - 1.17 - Add SDMA queue creation with target SDMA engine ID
*/
#define KFD_IOCTL_MAJOR_VERSION 1
#define KFD_IOCTL_MINOR_VERSION 16
#define KFD_IOCTL_MINOR_VERSION 17
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -56,6 +57,7 @@ struct kfd_ioctl_get_version_args {
#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
@@ -78,6 +80,8 @@ struct kfd_ioctl_create_queue_args {
__u64 ctx_save_restore_address; /* to KFD */
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
__u32 sdma_engine_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_destroy_queue_args {

View File

@@ -3,6 +3,7 @@
#define __LINUX_NSFS_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define NSIO 0xb7
@@ -16,7 +17,7 @@
/* Get owner UID (in the caller's user namespace) for a user namespace */
#define NS_GET_OWNER_UID _IO(NSIO, 0x4)
/* Get the id for a mount namespace */
#define NS_GET_MNTNS_ID _IO(NSIO, 0x5)
#define NS_GET_MNTNS_ID _IOR(NSIO, 0x5, __u64)
/* Translate pid from target pid namespace into the caller's pid namespace. */
#define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int)
/* Return thread-group leader id of pid in the callers pid namespace. */

View File

@@ -51,6 +51,7 @@ typedef enum {
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
SEV_RET_INAVLID_CONFIG,
SEV_RET_INVALID_CONFIG = SEV_RET_INAVLID_CONFIG,
SEV_RET_INVALID_LEN,
SEV_RET_ALREADY_OWNED,
SEV_RET_INVALID_CERTIFICATE,

View File

@@ -8,14 +8,11 @@
#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)

View File

@@ -676,6 +676,14 @@ enum ufshcd_quirks {
* the standard best practice for managing keys).
*/
UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24,
/*
* This quirk indicates that the controller reports the value 1 (not
* supported) in the Legacy Single DoorBell Support (LSDBS) bit of the
* Controller Capabilities register although it supports the legacy
* single doorbell mode.
*/
UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25,
};
enum ufshcd_caps {