Merge branch 'next' into for-linus

Prepare input updates for 5.4 merge window.
This commit is contained in:
Dmitry Torokhov
2019-09-16 09:56:27 -07:00
13457 changed files with 1035274 additions and 452516 deletions

1268
include/Kbuild Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -506,13 +506,16 @@ int acpi_bus_get_status(struct acpi_device *device);
int acpi_bus_set_power(acpi_handle handle, int state);
const char *acpi_power_state_string(int state);
int acpi_device_get_power(struct acpi_device *device, int *state);
int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,
struct device *dev);
#ifdef CONFIG_PM
bool acpi_bus_can_wakeup(acpi_handle handle);
@@ -651,6 +654,12 @@ static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
}
#endif
#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
bool acpi_sleep_state_supported(u8 sleep_state);
#else
static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; }
#endif
#ifdef CONFIG_ACPI_SLEEP
u32 acpi_target_system_state(void);
#else

View File

@@ -12,7 +12,7 @@
#define ACPI_MAX_STRING 80
/*
* Please update drivers/acpi/debug.c and Documentation/acpi/debug.txt
* Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst
* if you add to this list.
*/
#define ACPI_BUS_COMPONENT 0x00010000

View File

@@ -16,8 +16,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
extern bool acpi_permanent_mmap;
void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __iomem __ref
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);

View File

@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20190509
#define ACPI_CA_VERSION 0x20190703
#include <acpi/acconfig.h>
#include <acpi/actypes.h>

View File

@@ -10,24 +10,24 @@
#include <linux/types.h>
typedef struct {
long long counter;
s64 counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
extern s64 atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, s64 i);
#define atomic64_set_release(v, i) atomic64_set((v), (i))
#define ATOMIC64_OP(op) \
extern void atomic64_##op(long long a, atomic64_t *v);
extern void atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +46,11 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern s64 atomic64_dec_if_positive(atomic64_t *v);
#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
extern s64 atomic64_xchg(atomic64_t *v, s64 new);
extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */

View File

@@ -0,0 +1,263 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file provides wrappers with sanitizer instrumentation for bit
* operations.
*
* To use this functionality, an arch's bitops.h file needs to define each of
* the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
* arch___set_bit(), etc.).
*/
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
#include <linux/kasan-checks.h>
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
/**
* __clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
/**
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* This is a non-atomic operation but implies a release barrier before the
* memory operation. It can be used for an unlock if no other CPUs can
* concurrently modify other bits in the word.
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics if
* the returned value is 0.
* It can be used to implement bit locks.
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
/**
* __test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
#if defined(arch_clear_bit_unlock_is_negative_byte)
/**
* clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
* byte is negative, for unlock.
* @nr: the bit to clear
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*
* This is a bit of a one-trick-pony for the filemap code, which clears
* PG_locked and tests PG_waiters,
*/
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
#endif
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */

View File

@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_TAINT(TAINT_WARN)
#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
#define __WARN() do { \
printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
} while (0)
#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
#define __WARN_printf_taint(taint, arg...) \
do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
#endif

View File

@@ -5,24 +5,70 @@
/* Keep includes the same across arches. */
#include <linux/mm.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
static inline void flush_cache_all(void)
{
}
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
static inline void flush_dcache_page(struct page *page)
{
}
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \

View File

@@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_FLAT_H
#define _ASM_GENERIC_FLAT_H
#include <linux/uaccess.h>
static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
u32 *addr)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
#else
return get_user(*addr, rp);
#endif
}
static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
#else
return put_user(addr, rp);
#endif
}
#endif /* _ASM_GENERIC_FLAT_H */

View File

@@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval = 0, ret;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
case FUTEX_OP_ADD:
case FUTEX_OP_OR:
case FUTEX_OP_ANDN:
case FUTEX_OP_XOR:
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret)
*oval = oldval;
return ret;
return -ENOSYS;
}
static inline int

View File

@@ -7,24 +7,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
/*
* Runtime evaluation of get_order()
*/
static inline __attribute_const__
int __get_order(unsigned long size)
{
int order;
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
order = fls(size);
#else
order = fls64(size);
#endif
return order;
}
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
*
* This function may be used to initialise variables with compile time
* evaluations of constants.
*/
#define get_order(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \
ilog2((n) - 1) - PAGE_SHIFT + 1) \
) : \
__get_order(n) \
)
static inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
return BITS_PER_LONG - PAGE_SHIFT;
if (size < (1UL << PAGE_SHIFT))
return 0;
return ilog2((size) - 1) - PAGE_SHIFT + 1;
}
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
return fls(size);
#else
return fls64(size);
#endif
}
#endif /* __ASSEMBLY__ */

View File

@@ -0,0 +1,180 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux-specific definitions for managing interactions with Microsoft's
* Hyper-V hypervisor. The definitions in this file are architecture
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
* that are specific to architecture <arch>.
*
* Definitions that are specified in the Hyper-V Top Level Functional
* Spec (TLFS) should not go in this file, but should instead go in
* hyperv-tlfs.h.
*
* Copyright (C) 2019, Microsoft, Inc.
*
* Author : Michael Kelley <mikelley@microsoft.com>
*/
#ifndef _ASM_GENERIC_MSHYPERV_H
#define _ASM_GENERIC_MSHYPERV_H
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <asm/ptrace.h>
#include <asm/hyperv-tlfs.h>
struct ms_hyperv_info {
u32 features;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
};
extern struct ms_hyperv_info ms_hyperv;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
__u64 d_info2)
{
__u64 guest_id = 0;
guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (d_info1 << 48);
guest_id |= (kernel_version << 16);
guest_id |= d_info2;
return guest_id;
}
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
/*
* On crash we're reading some other CPU's message page and we need
* to be careful: this other CPU may already had cleared the header
* and the host may already had delivered some other message there.
* In case we blindly write msg->header.message_type we're going
* to lose it. We can still lose a message of the same type but
* we count on the fact that there can only be one
* CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
* on crash.
*/
if (cmpxchg(&msg->header.message_type, old_msg_type,
HVMSG_NONE) != old_msg_type)
return;
/*
* The cmxchg() above does an implicit memory barrier to
* ensure the write to MessageType (ie set to
* HVMSG_NONE) happens before we read the
* MessagePending and EOMing. Otherwise, the EOMing
* will not deliver any more messages since there is
* no empty slot
*/
if (msg->header.message_flags.msg_pending) {
/*
* This will cause message queue rescan to
* possibly deliver another msg from the
* hypervisor
*/
hv_signal_eom();
}
}
void hv_setup_vmbus_irq(void (*handler)(void));
void hv_remove_vmbus_irq(void);
void hv_enable_vmbus_irq(void);
void hv_disable_vmbus_irq(void);
void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
* in the context of the calling CPU. Setup a map for easy access
* to this information.
*/
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
* This function returns the mapping between the Linux processor
* number and the hypervisor's virtual processor number, useful
* in making hypercalls and such that talk about specific
* processors.
*
* Return: Virtual processor number in Hyper-V terms
*/
static inline int hv_cpu_number_to_vp_number(int cpu_number)
{
return hv_vp_index[cpu_number];
}
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
const struct cpumask *cpus)
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
return 0;
/*
* Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
vpset->bank_contents[vcpu_bank] = 0;
/*
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
__set_bit(vcpu_offset, (unsigned long *)
&vpset->bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
nr_bank = vcpu_bank + 1;
}
vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
return nr_bank;
}
void hyperv_report_panic(struct pt_regs *regs, long err);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline void hyperv_cleanup(void) {}
#endif /* CONFIG_HYPERV */
#if IS_ENABLED(CONFIG_HYPERV)
extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
extern void hv_remove_stimer0_irq(int irq);
#endif
#endif

View File

@@ -1,13 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_PGALLOC_H
#define __ASM_GENERIC_PGALLOC_H
/*
* an empty file is enough for a nommu architecture
*/
#ifdef CONFIG_MMU
#error need to implement an architecture specific asm/pgalloc.h
#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
/**
* __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* This function is intended for architectures that need
* anything beyond simple page allocation.
*
* Return: pointer to the allocated memory or %NULL on error
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/**
* pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* Return: pointer to the allocated memory or %NULL on error
*/
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return __pte_alloc_one_kernel(mm);
}
#endif
/**
* pte_free_kernel - free PTE-level kernel page table page
* @mm: the mm_struct of the current context
* @pte: pointer to the memory containing the page table
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
/**
* __pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
* Allocates a page and runs the pgtable_page_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
*
* Return: `struct page` initialized as page table or %NULL on error
*/
static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
{
struct page *pte;
pte = alloc_page(gfp);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
return pte;
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
/**
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
* Allocates a page and runs the pgtable_page_ctor().
*
* Return: `struct page` initialized as page table or %NULL on error
*/
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
return __pte_alloc_one(mm, GFP_PGTABLE_USER);
}
#endif
/*
* Should really implement gc for free page table pages. This could be
* done with a reference count in struct page.
*/
/**
* pte_free - free PTE-level user page table page
* @mm: the mm_struct of the current context
* @pte_page: the `struct page` representing the page table
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
pgtable_page_dtor(pte_page);
__free_page(pte_page);
}
#else /* CONFIG_MMU */
/* This is enough for a nommu architecture */
#define check_pgt_cache() do { } while (0)
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */

View File

@@ -1,73 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Common low level (register) ptrace helpers
*
* Copyright 2004-2011 Analog Devices Inc.
*/
#ifndef __ASM_GENERIC_PTRACE_H__
#define __ASM_GENERIC_PTRACE_H__
#ifndef __ASSEMBLY__
/* Helpers for working with the instruction pointer */
#ifndef GET_IP
#define GET_IP(regs) ((regs)->pc)
#endif
#ifndef SET_IP
#define SET_IP(regs, val) (GET_IP(regs) = (val))
#endif
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
return GET_IP(regs);
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_IP(regs, val);
}
#ifndef profile_pc
#define profile_pc(regs) instruction_pointer(regs)
#endif
/* Helpers for working with the user stack pointer */
#ifndef GET_USP
#define GET_USP(regs) ((regs)->usp)
#endif
#ifndef SET_USP
#define SET_USP(regs, val) (GET_USP(regs) = (val))
#endif
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return GET_USP(regs);
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_USP(regs, val);
}
/* Helpers for working with the frame pointer */
#ifndef GET_FP
#define GET_FP(regs) ((regs)->fp)
#endif
#ifndef SET_FP
#define SET_FP(regs, val) (GET_FP(regs) = (val))
#endif
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
return GET_FP(regs);
}
static inline void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_FP(regs, val);
}
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_VSYSCALL_H
#define __ASM_GENERIC_VSYSCALL_H
#ifndef __ASSEMBLY__
#ifndef __arch_get_k_vdso_data
static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
{
return NULL;
}
#endif /* __arch_get_k_vdso_data */
#ifndef __arch_update_vdso_data
static __always_inline int __arch_update_vdso_data(void)
{
return 0;
}
#endif /* __arch_update_vdso_data */
#ifndef __arch_get_clock_mode
static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
{
return 0;
}
#endif /* __arch_get_clock_mode */
#ifndef __arch_use_vsyscall
static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata)
{
return 1;
}
#endif /* __arch_use_vsyscall */
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
struct timekeeper *tk)
{
}
#endif /* __arch_update_vsyscall */
#ifndef __arch_sync_vdso_data
static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
{
}
#endif /* __arch_sync_vdso_data */
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_VSYSCALL_H */

View File

@@ -110,10 +110,17 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__patchable_function_entries)) \
__stop_mcount_loc = .;
#else
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
__stop_mcount_loc = .;
#endif
#else
#define MCOUNT_REC()
#endif
@@ -239,6 +246,16 @@
#define ACPI_PROBE_TABLE(name)
#endif
#ifdef CONFIG_THERMAL
#define THERMAL_TABLE(name) \
. = ALIGN(8); \
__##name##_thermal_table = .; \
KEEP(*(__##name##_thermal_table)) \
__##name##_thermal_table_end = .;
#else
#define THERMAL_TABLE(name)
#endif
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
__dtb_start = .; \
@@ -608,6 +625,7 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE()

View File

@@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the clocksource provided by the Hyper-V
* hypervisor to guest VMs, as described in the Hyper-V Top
* Level Functional Spec (TLFS).
*
* Copyright (C) 2019, Microsoft, Inc.
*
* Author: Michael Kelley <mikelley@microsoft.com>
*/
#ifndef __CLKSOURCE_HYPERV_TIMER_H
#define __CLKSOURCE_HYPERV_TIMER_H
#include <linux/clocksource.h>
#include <linux/math64.h>
#include <asm/mshyperv.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
/* Routines called by the VMbus driver */
extern int hv_stimer_alloc(int sint);
extern void hv_stimer_free(void);
extern void hv_stimer_init(unsigned int cpu);
extern void hv_stimer_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
#if IS_ENABLED(CONFIG_HYPERV)
extern struct clocksource *hyperv_cs;
extern void hv_init_clocksource(void);
#endif /* CONFIG_HYPERV */
#ifdef CONFIG_HYPERV_TSCPAGE
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
static inline notrace u64
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
{
u64 scale, offset;
u32 sequence;
/*
* The protocol for reading Hyper-V TSC page is specified in Hypervisor
* Top-Level Functional Specification ver. 3.0 and above. To get the
* reference time we must do the following:
* - READ ReferenceTscSequence
* A special '0' value indicates the time source is unreliable and we
* need to use something else. The currently published specification
* versions (up to 4.0b) contain a mistake and wrongly claim '-1'
* instead of '0' as the special value, see commit c35b82ef0294.
* - ReferenceTime =
* ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
* - READ ReferenceTscSequence again. In case its value has changed
* since our first reading we need to discard ReferenceTime and repeat
* the whole sequence as the hypervisor was updating the page in
* between.
*/
do {
sequence = READ_ONCE(tsc_pg->tsc_sequence);
if (!sequence)
return U64_MAX;
/*
* Make sure we read sequence before we read other values from
* TSC page.
*/
smp_rmb();
scale = READ_ONCE(tsc_pg->tsc_scale);
offset = READ_ONCE(tsc_pg->tsc_offset);
*cur_tsc = hv_get_raw_timer();
/*
* Make sure we read sequence after we read all other values
* from TSC page.
*/
smp_rmb();
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
}
static inline notrace u64
hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
{
u64 cur_tsc;
return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
}
#else /* CONFIG_HYPERV_TSC_PAGE */
static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return NULL;
}
static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
u64 *cur_tsc)
{
return U64_MAX;
}
#endif /* CONFIG_HYPERV_TSCPAGE */
#endif

View File

@@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci clocksource driver
*
* Copyright (C) 2019 Texas Instruments
* Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
*/
#ifndef __TIMER_DAVINCI_H__
#define __TIMER_DAVINCI_H__
#include <linux/clk.h>
#include <linux/ioport.h>
enum {
DAVINCI_TIMER_CLOCKEVENT_IRQ,
DAVINCI_TIMER_CLOCKSOURCE_IRQ,
DAVINCI_TIMER_NUM_IRQS,
};
/**
* struct davinci_timer_cfg - davinci clocksource driver configuration struct
* @reg: register range resource
* @irq: clockevent and clocksource interrupt resources
* @cmp_off: if set - it specifies the compare register used for clockevent
*
* Note: if the compare register is specified, the driver will use the bottom
* clock half for both clocksource and clockevent and the compare register
* to generate event irqs. The user must supply the correct compare register
* interrupt number.
*
* This is only used by da830 the DSP of which uses the top half. The timer
* driver still configures the top half to run in free-run mode.
*/
struct davinci_timer_cfg {
struct resource reg;
struct resource irq[DAVINCI_TIMER_NUM_IRQS];
unsigned int cmp_off;
};
int __init davinci_timer_register(struct clk *clk,
const struct davinci_timer_cfg *data);
#endif /* __TIMER_DAVINCI_H__ */

View File

@@ -317,21 +317,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_alg *alg = aead->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
int ret;
crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_aead_alg(aead)->encrypt(req);
crypto_stats_aead_encrypt(cryptlen, alg, ret);
return ret;
}
int crypto_aead_encrypt(struct aead_request *req);
/**
* crypto_aead_decrypt() - decrypt ciphertext
@@ -355,23 +341,7 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
* integrity of the ciphertext or the associated data was violated);
* < 0 if an error occurred.
*/
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_alg *alg = aead->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
int ret;
crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = crypto_aead_alg(aead)->decrypt(req);
crypto_stats_aead_decrypt(cryptlen, alg, ret);
return ret;
}
int crypto_aead_decrypt(struct aead_request *req);
/**
* DOC: Asynchronous AEAD Request Handle

View File

@@ -189,7 +189,6 @@ void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
{
return queue->qlen;
@@ -371,12 +370,6 @@ static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
return req->__ctx;
}
static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
struct crypto_ablkcipher *tfm)
{
return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
}
static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
u32 type, u32 mask)
{

View File

@@ -6,8 +6,18 @@
#ifndef _CRYPTO_ARC4_H
#define _CRYPTO_ARC4_H
#include <linux/types.h>
#define ARC4_MIN_KEY_SIZE 1
#define ARC4_MAX_KEY_SIZE 256
#define ARC4_BLOCK_SIZE 1
struct arc4_ctx {
u32 S[256];
u32 x, y;
};
int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len);
void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len);
#endif /* _CRYPTO_ARC4_H */

View File

@@ -41,7 +41,7 @@ static inline void chacha20_block(u32 *state, u8 *stream)
}
void hchacha_block(const u32 *in, u32 *out, int nrounds);
void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv);
void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);

View File

@@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CRYPTO_WQ_H
#define CRYPTO_WQ_H
#include <linux/workqueue.h>
extern struct workqueue_struct *kcrypto_wq;
#endif

View File

@@ -129,6 +129,8 @@ struct drbg_state {
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;

View File

@@ -196,12 +196,6 @@ static inline struct ahash_request *ahash_dequeue_request(
return ahash_request_cast(crypto_dequeue_request(queue));
}
static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
struct crypto_ahash *tfm)
{
return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
}
static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
{
return crypto_tfm_ctx(&tfm->base);

View File

@@ -200,6 +200,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
return alg->max_keysize;
}
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->chunksize;
}
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->walksize;
}
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
}
/**
* crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle
*
* In some cases, algorithms can only perform optimally when operating on
* multiple blocks in parallel. This is reflected by the walksize, which
* must be a multiple of the chunksize (or equal if the concern does not
* apply)
*
* Return: walk size in bytes
*/
static inline unsigned int crypto_skcipher_walksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
}
/* Helpers for simple block cipher modes of operation */
struct skcipher_ctx_simple {
struct crypto_cipher *cipher; /* underlying block cipher */

View File

@@ -288,66 +288,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
return crypto_skcipher_ivsize(&tfm->base);
}
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->chunksize;
}
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->walksize;
}
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
}
/**
* crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle
*
* In some cases, algorithms can only perform optimally when operating on
* multiple blocks in parallel. This is reflected by the walksize, which
* must be a multiple of the chunksize (or equal if the concern does not
* apply)
*
* Return: walk size in bytes
*/
static inline unsigned int crypto_skcipher_walksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
}
/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -479,21 +419,7 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
int ret;
crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->encrypt(req);
crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
return ret;
}
int crypto_skcipher_encrypt(struct skcipher_request *req);
/**
* crypto_skcipher_decrypt() - decrypt ciphertext
@@ -506,21 +432,7 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_alg *alg = tfm->base.__crt_alg;
unsigned int cryptlen = req->cryptlen;
int ret;
crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->decrypt(req);
crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
return ret;
}
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
* DOC: Symmetric Key Cipher Request Handle

View File

@@ -49,6 +49,7 @@ enum amd_asic_type {
CHIP_VEGA12,
CHIP_VEGA20,
CHIP_RAVEN,
CHIP_NAVI10,
CHIP_LAST,
};

View File

@@ -150,6 +150,8 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);

View File

@@ -9,10 +9,20 @@
#ifndef __DW_MIPI_DSI__
#define __DW_MIPI_DSI__
#include <linux/types.h>
#include <drm/drm_modes.h>
struct drm_display_mode;
struct drm_encoder;
struct dw_mipi_dsi;
struct mipi_dsi_device;
struct platform_device;
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
void (*power_on)(void *priv_data);
void (*power_off)(void *priv_data);
int (*get_lane_mbps)(void *priv_data,
const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,

View File

@@ -459,6 +459,13 @@ struct drm_private_state *
drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_connector *
drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
* @state: global atomic state object
@@ -950,4 +957,19 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
state->connectors_changed;
}
/**
* drm_atomic_crtc_effectively_active - compute whether crtc is actually active
* @state: &drm_crtc_state for the CRTC
*
* When in self refresh mode, the crtc_state->active value will be false, since
* the crtc is off. However in some cases we're interested in whether the crtc
* is active, or effectively active (ie: it's connected to an active display).
* In these cases, use this function instead of just checking active.
*/
static inline bool
drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
{
return state->active || state->self_refresh_active;
}
#endif /* DRM_ATOMIC_H_ */

View File

@@ -117,12 +117,8 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state);
int drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);

View File

@@ -37,6 +37,8 @@ struct drm_private_state;
struct drm_modeset_acquire_ctx;
struct drm_device;
void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -60,6 +62,7 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state);

View File

@@ -1,3 +1,6 @@
#ifndef _DRM_AUTH_H_
#define _DRM_AUTH_H_
/*
* Internal Header for the Direct Rendering Manager
*
@@ -25,8 +28,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_AUTH_H_
#define _DRM_AUTH_H_
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/wait.h>
struct drm_file;
struct drm_hw_lock;
/*
* Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for

View File

@@ -237,6 +237,103 @@ struct drm_bridge_funcs {
* The enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
/**
* @atomic_pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_pre_enable or @pre_enable function. If the preceding
* element is a &drm_encoder it's called right before the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
* Note that this function will only be invoked in the context of an
* atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
* would be prudent to also provide an implementation of @pre_enable if
* you are expecting driver calls into &drm_bridge_pre_enable.
*
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
/**
* @atomic_enable:
*
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's @atomic_enable or @enable function. If the preceding element
* is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* Note that this function will only be invoked in the context of an
* atomic commit. It will not be invoked from &drm_bridge_enable. It
* would be prudent to also provide an implementation of @enable if
* you are expecting driver calls into &drm_bridge_enable.
*
* The enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
/**
* @atomic_disable:
*
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_disable or @disable vfunc. If the preceding element
* is a &drm_encoder it's called right before the
* &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
* Note that this function will only be invoked in the context of an
* atomic commit. It will not be invoked from &drm_bridge_disable. It
* would be prudent to also provide an implementation of @disable if
* you are expecting driver calls into &drm_bridge_disable.
*
* The disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
/**
* @atomic_post_disable:
*
* This callback should disable the bridge. It is called right after the
* preceding element in the display pipe is disabled. If the preceding
* element is a bridge this means it's called after that bridge's
* @atomic_post_disable or @post_disable function. If the preceding
* element is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* signals) feeding it is no longer running when this callback is
* called.
*
* Note that this function will only be invoked in the context of an
* atomic commit. It will not be invoked from &drm_bridge_post_disable.
* It would be prudent to also provide an implementation of
* @post_disable if you are expecting driver calls into
* &drm_bridge_post_disable.
*
* The post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
};
/**
@@ -265,6 +362,14 @@ struct drm_bridge_timings {
* input signal after the clock edge.
*/
u32 hold_time_ps;
/**
* @dual_link:
*
* True if the bus operates in dual-link mode. The exact meaning is
* dependent on the bus type. For LVDS buses, this indicates that even-
* and odd-numbered pixels are received on separate links.
*/
bool dual_link;
};
/**
@@ -314,6 +419,15 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
void drm_atomic_bridge_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type);

View File

@@ -3,8 +3,13 @@
#ifndef _DRM_CLIENT_H_
#define _DRM_CLIENT_H_
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
struct drm_client_dev;
struct drm_device;
struct drm_file;
@@ -85,6 +90,16 @@ struct drm_client_dev {
* @file: DRM file
*/
struct drm_file *file;
/**
* @modeset_mutex: Protects @modesets.
*/
struct mutex modeset_mutex;
/**
* @modesets: CRTC configurations
*/
struct drm_mode_set *modesets;
};
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
@@ -134,6 +149,39 @@ struct drm_client_buffer {
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
int drm_client_modeset_commit_force(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
/**
* drm_client_for_each_modeset() - Iterate over client modesets
* @modeset: &drm_mode_set loop cursor
* @client: DRM client
*/
#define drm_client_for_each_modeset(modeset, client) \
for (({ lockdep_assert_held(&(client)->modeset_mutex); }), \
modeset = (client)->modesets; modeset->crtc; modeset++)
/**
* drm_client_for_each_connector_iter - connector_list iterator macro
* @connector: &struct drm_connector pointer used as cursor
* @iter: &struct drm_connector_list_iter
*
* This iterates the connectors that are useable for internal clients (excludes
* writeback connectors).
*
* For more info see drm_for_each_connector_iter().
*/
#define drm_client_for_each_connector_iter(connector, iter) \
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
int drm_client_debugfs_init(struct drm_minor *minor);

View File

@@ -463,14 +463,38 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
unsigned int num_formats);
/**
* struct drm_connector_tv_margins - TV connector related margins
*
* Describes the margins in pixels to put around the image on TV
* connectors to deal with overscan.
*/
struct drm_connector_tv_margins {
/**
* @bottom: Bottom margin in pixels.
*/
unsigned int bottom;
/**
* @left: Left margin in pixels.
*/
unsigned int left;
/**
* @right: Right margin in pixels.
*/
unsigned int right;
/**
* @top: Top margin in pixels.
*/
unsigned int top;
};
/**
* struct drm_tv_connector_state - TV connector related states
* @subconnector: selected subconnector
* @margins: margins (all margins are expressed in pixels)
* @margins.left: left margin
* @margins.right: right margin
* @margins.top: top margin
* @margins.bottom: bottom margin
* @margins: TV margins
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -481,12 +505,7 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
*/
struct drm_tv_connector_state {
enum drm_mode_subconnector subconnector;
struct {
unsigned int left;
unsigned int right;
unsigned int top;
unsigned int bottom;
} margins;
struct drm_connector_tv_margins margins;
unsigned int mode;
unsigned int brightness;
unsigned int contrast;
@@ -517,6 +536,15 @@ struct drm_connector_state {
* Used by the atomic helpers to select the encoder, through the
* &drm_connector_helper_funcs.atomic_best_encoder or
* &drm_connector_helper_funcs.best_encoder callbacks.
*
* This is also used in the atomic helpers to map encoders to their
* current and previous connectors, see
* &drm_atomic_get_old_connector_for_encoder() and
* &drm_atomic_get_new_connector_for_encoder().
*
* NOTE: Atomic drivers must fill this out (either themselves or through
* helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
* not return correct data to userspace.
*/
struct drm_encoder *best_encoder;
@@ -539,6 +567,20 @@ struct drm_connector_state {
/** @tv: TV connector state */
struct drm_tv_connector_state tv;
/**
* @self_refresh_aware:
*
* This tracks whether a connector is aware of the self refresh state.
* It should be set to true for those connector implementations which
* understand the self refresh state. This is needed since the crtc
* registers the self refresh helpers and it doesn't know if the
* connectors downstream have implemented self refresh entry/exit.
*
* Drivers should set this to true in atomic_check if they know how to
* handle self_refresh requests.
*/
bool self_refresh_aware;
/**
* @picture_aspect_ratio: Connector property to control the
* HDMI infoframe aspect ratio setting.
@@ -599,6 +641,12 @@ struct drm_connector_state {
* and the connector bpc limitations obtained from edid.
*/
u8 max_bpc;
/**
* @hdr_output_metadata:
* DRM blob property for HDR output metadata
*/
struct drm_property_blob *hdr_output_metadata;
};
/**
@@ -894,19 +942,123 @@ struct drm_connector_funcs {
const struct drm_connector_state *state);
};
/* mode specified on the command line */
/**
* struct drm_cmdline_mode - DRM Mode passed through the kernel command-line
*
* Each connector can have an initial mode with additional options
* passed through the kernel command line. This structure allows to
* express those parameters and will be filled by the command-line
* parser.
*/
struct drm_cmdline_mode {
/**
* @name:
*
* Name of the mode.
*/
char name[DRM_DISPLAY_MODE_LEN];
/**
* @specified:
*
* Has a mode been read from the command-line?
*/
bool specified;
/**
* @refresh_specified:
*
* Did the mode have a preferred refresh rate?
*/
bool refresh_specified;
/**
* @bpp_specified:
*
* Did the mode have a preferred BPP?
*/
bool bpp_specified;
int xres, yres;
/**
* @xres:
*
* Active resolution on the X axis, in pixels.
*/
int xres;
/**
* @yres:
*
* Active resolution on the Y axis, in pixels.
*/
int yres;
/**
* @bpp:
*
* Bits per pixels for the mode.
*/
int bpp;
/**
* @refresh:
*
* Refresh rate, in Hertz.
*/
int refresh;
/**
* @rb:
*
* Do we need to use reduced blanking?
*/
bool rb;
/**
* @interlace:
*
* The mode is interlaced.
*/
bool interlace;
/**
* @cvt:
*
* The timings will be calculated using the VESA Coordinated
* Video Timings instead of looking up the mode from a table.
*/
bool cvt;
/**
* @margins:
*
* Add margins to the mode calculation (1.8% of xres rounded
* down to 8 pixels and 1.8% of yres).
*/
bool margins;
/**
* @force:
*
* Ignore the hotplug state of the connector, and force its
* state to one of the DRM_FORCE_* values.
*/
enum drm_connector_force force;
/**
* @rotation_reflection:
*
* Initial rotation and reflection of the mode setup from the
* command line. See DRM_MODE_ROTATE_* and
* DRM_MODE_REFLECT_*. The only rotations supported are
* DRM_MODE_ROTATE_0 and DRM_MODE_ROTATE_180.
*/
unsigned int rotation_reflection;
/**
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
};
/**
@@ -1061,12 +1213,6 @@ struct drm_connector {
*/
struct drm_property *vrr_capable_property;
/**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
struct drm_property *content_protection_property;
/**
* @colorspace_property: Connector property to set the suitable
* colorspace supported by the sink.
@@ -1239,6 +1385,9 @@ struct drm_connector {
* &drm_mode_config.connector_free_work.
*/
struct llist_node free_node;
/** @hdr_sink_metadata: HDR Metadata Information read from sink */
struct hdr_sink_metadata hdr_sink_metadata;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -1345,8 +1494,6 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);

View File

@@ -39,6 +39,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -53,6 +54,7 @@ struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
struct drm_printer;
struct drm_self_refresh_data;
struct device_node;
struct dma_fence;
struct edid;
@@ -299,6 +301,17 @@ struct drm_crtc_state {
*/
bool vrr_enabled;
/**
* @self_refresh_active:
*
* Used by the self refresh helpers to denote when a self refresh
* transition is occurring. This will be set on enable/disable callbacks
* when self refresh is being enabled or disabled. In some cases, it may
* not be desirable to fully shut off the crtc during self refresh.
* CRTC's can inspect this flag and determine the best course of action.
*/
bool self_refresh_active;
/**
* @event:
*
@@ -1087,6 +1100,13 @@ struct drm_crtc {
* The name of the CRTC's fence timeline.
*/
char timeline_name[32];
/**
* @self_refresh_data: Holds the state for the self refresh helpers
*
* Initialized via drm_self_refresh_helper_register().
*/
struct drm_self_refresh_data *self_refresh_data;
};
/**

View File

@@ -32,6 +32,8 @@
#ifndef _DRM_DEBUGFS_H_
#define _DRM_DEBUGFS_H_
#include <linux/types.h>
#include <linux/seq_file.h>
/**
* struct drm_info_list - debugfs info list entry
*

View File

@@ -17,6 +17,7 @@ struct drm_vblank_crtc;
struct drm_sg_mem;
struct drm_local_map;
struct drm_vma_offset_manager;
struct drm_vram_mm;
struct drm_fb_helper;
struct inode;
@@ -286,6 +287,9 @@ struct drm_device {
/** @vma_offset_manager: GEM information */
struct drm_vma_offset_manager *vma_offset_manager;
/** @vram_mm: VRAM MM memory manager */
struct drm_vram_mm *vram_mm;
/**
* @switch_power_state:
*

View File

@@ -40,6 +40,7 @@
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
#define DATA_BLOCK_TILED_DISPLAY 0x12
#define DATA_BLOCK_CTA 0x81
#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
@@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[0];
};
#define for_each_displayid_db(displayid, block, idx, length) \
for ((block) = (struct displayid_block *)&(displayid)[idx]; \
(idx) + sizeof(struct displayid_block) <= (length) && \
(idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
(block)->num_bytes > 0; \
(idx) += (block)->num_bytes + sizeof(struct displayid_block), \
(block) = (struct displayid_block *)&(displayid)[idx])
#endif

View File

@@ -249,6 +249,7 @@
#define DP_DSC_PEAK_THROUGHPUT 0x06B
# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0)
# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0
# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0
# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0)
@@ -263,8 +264,10 @@
# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4)
# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4
# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4)
@@ -279,6 +282,7 @@
# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4)
#define DP_DSC_MAX_SLICE_WIDTH 0x06C
#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
@@ -352,6 +356,11 @@
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
/* DP Extended DSC Capabilities */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -1083,17 +1092,30 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
struct edp_vsc_psr {
/**
* struct dp_sdp - DP secondary data packet
* @sdp_header: DP secondary data packet header
* @db: DP secondaray data packet data blocks
* VSC SDP Payload for PSR
* db[0]: Stereo Interface
* db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid
* db[2]: CRC value bits 7:0 of the R or Cr component
* db[3]: CRC value bits 15:8 of the R or Cr component
* db[4]: CRC value bits 7:0 of the G or Y component
* db[5]: CRC value bits 15:8 of the G or Y component
* db[6]: CRC value bits 7:0 of the B or Cb component
* db[7]: CRC value bits 15:8 of the B or Cb component
* db[8] - db[31]: Reserved
* VSC SDP Payload for Pixel Encoding/Colorimetry Format
* db[0] - db[15]: Reserved
* db[16]: Pixel Encoding and Colorimetry Formats
* db[17]: Dynamic Range and Component Bit Depth
* db[18]: Content Type
* db[19] - db[31]: Reserved
*/
struct dp_sdp {
struct dp_sdp_header sdp_header;
u8 DB0; /* Stereo Interface */
u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
u8 DB4; /* CRC value bits 7:0 of the G or Y component */
u8 DB5; /* CRC value bits 15:8 of the G or Y component */
u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
u8 DB8_31[24]; /* Reserved */
u8 db[32];
} __packed;
#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
@@ -1401,6 +1423,13 @@ enum drm_dp_quirk {
* driver still need to implement proper handling for such device.
*/
DP_DPCD_QUIRK_NO_PSR,
/**
* @DP_DPCD_QUIRK_NO_SINK_COUNT:
*
* The device does not set SINK_COUNT to a non-zero value.
* The driver should ignore SINK_COUNT during detection.
*/
DP_DPCD_QUIRK_NO_SINK_COUNT,
};
/**

View File

@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/hdmi.h>
#include <drm/drm_mode.h>
struct drm_device;
struct i2c_adapter;
@@ -176,21 +177,23 @@ struct detailed_timing {
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7)
#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
#define DRM_EDID_DIGITAL_TYPE_DVI (1)
#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
#define DRM_EDID_DIGITAL_TYPE_DP (5)
#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
@@ -370,6 +373,10 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
int
drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
const struct drm_connector_state *conn_state);
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
* @eld: pointer to an eld memory structure with mnl set

View File

@@ -43,17 +43,6 @@ enum mode_set_atomic {
ENTER_ATOMIC_MODE_SET,
};
struct drm_fb_offset {
int x, y;
};
struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
int x, y;
int rotation;
};
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
* @fb_width: fbdev width
@@ -104,18 +93,10 @@ struct drm_fb_helper_funcs {
struct drm_fb_helper_surface_size *sizes);
};
struct drm_fb_helper_connector {
struct drm_connector *connector;
};
/**
* struct drm_fb_helper - main structure to emulate fbdev on top of KMS
* @fb: Scanout framebuffer object
* @dev: DRM device
* @crtc_count: number of possible CRTCs
* @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
* @connector_count: number of connected connectors
* @connector_info_alloc_count: size of connector_info
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
@@ -147,24 +128,6 @@ struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
int connector_count;
int connector_info_alloc_count;
/**
* @sw_rotations:
* Bitmask of all rotations requested for panel-orientation which
* could not be handled in hardware. If only one bit is set
* fbdev->fbcon_rotate_hint gets set to the requested rotation.
*/
int sw_rotations;
/**
* @connector_info:
*
* Array of per-connector information. Do not iterate directly, but use
* drm_fb_helper_for_each_connector.
*/
struct drm_fb_helper_connector **connector_info;
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
@@ -304,18 +267,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height);
struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn);
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
int drm_fb_helper_fbdev_setup(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
@@ -490,12 +443,6 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
return 0;
}
static inline int
drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
return 0;
}
static inline int drm_fb_helper_debug_enter(struct fb_info *info)
{
return 0;
@@ -506,34 +453,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
return 0;
}
static inline struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height)
{
return NULL;
}
static inline struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
return NULL;
}
static inline int
drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
static inline int
drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
static inline int
drm_fb_helper_fbdev_setup(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
@@ -575,6 +494,27 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
/* TODO: There's a todo entry to remove these three */
static inline int
drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
return 0;
}
static inline int
drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
static inline int
drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
/**
* drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
* @a: memory range, users of which are to be removed

View File

@@ -260,6 +260,50 @@ drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info)
return info->is_yuv && info->hsub == 1 && info->vsub == 1;
}
/**
* drm_format_info_plane_width - width of the plane given the first plane
* @info: pixel format info
* @width: width of the first plane
* @plane: plane index
*
* Returns:
* The width of @plane, given that the width of the first plane is @width.
*/
static inline
int drm_format_info_plane_width(const struct drm_format_info *info, int width,
int plane)
{
if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
return width / info->hsub;
}
/**
* drm_format_info_plane_height - height of the plane given the first plane
* @info: pixel format info
* @height: height of the first plane
* @plane: plane index
*
* Returns:
* The height of @plane, given that the height of the first plane is @height.
*/
static inline
int drm_format_info_plane_height(const struct drm_format_info *info, int height,
int plane)
{
if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
return height / info->vsub;
}
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
@@ -268,12 +312,6 @@ drm_get_format_info(struct drm_device *dev,
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,

View File

@@ -87,6 +87,9 @@ struct drm_framebuffer_funcs {
* for more information as all the semantics and arguments have a one to
* one mapping on this function.
*
* Atomic drivers should use drm_atomic_helper_dirtyfb() to implement
* this hook.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.

View File

@@ -401,9 +401,4 @@ int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
void *drm_gem_vmap(struct drm_gem_object *obj);
void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
#endif /* __DRM_GEM_H__ */

View File

@@ -0,0 +1,153 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef DRM_GEM_VRAM_HELPER_H
#define DRM_GEM_VRAM_HELPER_H
#include <drm/drm_gem.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
struct drm_vram_mm_funcs;
struct filp;
struct vm_area_struct;
#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
/*
* Buffer-object helpers
*/
/**
* struct drm_gem_vram_object - GEM object backed by VRAM
* @gem: GEM object
* @bo: TTM buffer object
* @kmap: Mapping information for @bo
* @placement: TTM placement information. Supported placements are \
%TTM_PL_VRAM and %TTM_PL_SYSTEM
* @placements: TTM placement information.
* @pin_count: Pin counter
*
* The type struct drm_gem_vram_object represents a GEM object that is
* backed by VRAM. It can be used for simple framebuffer devices with
* dedicated memory. The buffer object can be evicted to system memory if
* video memory becomes scarce.
*/
struct drm_gem_vram_object {
struct drm_gem_object gem;
struct ttm_buffer_object bo;
struct ttm_bo_kmap_obj kmap;
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
int pin_count;
};
/**
* Returns the container of type &struct drm_gem_vram_object
* for field bo.
* @bo: the VRAM buffer object
* Returns: The containing GEM VRAM object
*/
static inline struct drm_gem_vram_object *drm_gem_vram_of_bo(
struct ttm_buffer_object *bo)
{
return container_of(bo, struct drm_gem_vram_object, bo);
}
/**
* Returns the container of type &struct drm_gem_vram_object
* for field gem.
* @gem: the GEM object
* Returns: The containing GEM VRAM object
*/
static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
struct drm_gem_object *gem)
{
return container_of(gem, struct drm_gem_vram_object, gem);
}
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct ttm_bo_device *bdev,
size_t size,
unsigned long pg_align,
bool interruptible);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem);
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
struct ttm_bo_device *bdev,
unsigned long pg_align,
bool interruptible,
struct drm_mode_create_dumb *args);
/*
* Helpers for struct ttm_bo_driver
*/
void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *pl);
int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
struct file *filp);
extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs;
/*
* Helpers for struct drm_driver
*/
void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem);
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, uint64_t *offset);
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for \
&struct drm_driver
*
* Drivers that use VRAM MM and GEM VRAM can use this macro to initialize
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
.gem_free_object_unlocked = \
drm_gem_vram_driver_gem_free_object_unlocked, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset
/*
* PRIME helpers for struct drm_driver
*/
int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *obj);
void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *obj);
void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *obj);
void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *obj,
void *vaddr);
int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
#define DRM_GEM_VRAM_DRIVER_PRIME \
.gem_prime_export = drm_gem_prime_export, \
.gem_prime_import = drm_gem_prime_import, \
.gem_prime_pin = drm_gem_vram_driver_gem_prime_pin, \
.gem_prime_unpin = drm_gem_vram_driver_gem_prime_unpin, \
.gem_prime_vmap = drm_gem_vram_driver_gem_prime_vmap, \
.gem_prime_vunmap = drm_gem_vram_driver_gem_prime_vunmap, \
.gem_prime_mmap = drm_gem_vram_driver_gem_prime_mmap
#endif

View File

@@ -252,17 +252,44 @@ struct hdcp2_rep_stream_ready {
* host format and back
*/
static inline
u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
{
return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16);
}
static inline
void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
{
seq_num[0] = val >> 16;
seq_num[1] = val >> 8;
seq_num[2] = val;
}
#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024)
#define DRM_HDCP_1_4_SRM_ID 0x8
#define DRM_HDCP_SRM_ID_MASK (0xF << 4)
#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
#define DRM_HDCP_2_SRM_ID 0x9
#define DRM_HDCP_2_INDICATOR 0x1
#define DRM_HDCP_2_INDICATOR_MASK 0xF
#define DRM_HDCP_2_VRL_LENGTH_SIZE 3
#define DRM_HDCP_2_DCP_SIG_SIZE 384
#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4
#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6)
struct hdcp_srm_header {
u8 srm_id;
u8 reserved;
__be16 srm_version;
u8 srm_gen_no;
} __packed;
struct drm_device;
struct drm_connector;
bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
u8 *ksvs, u32 ksv_count);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
#endif

View File

@@ -1,11 +1,5 @@
#ifndef __DRM_DRM_LEGACY_H__
#define __DRM_DRM_LEGACY_H__
#include <drm/drm_auth.h>
#include <drm/drm_hashtab.h>
struct drm_device;
/*
* Legacy driver interfaces for the Direct Rendering Manager
*
@@ -39,6 +33,12 @@ struct drm_device;
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm.h>
#include <drm/drm_auth.h>
#include <drm/drm_hashtab.h>
struct drm_device;
struct file;
/*
* Legacy Support for palateontologic DRM drivers

View File

@@ -836,9 +836,29 @@ struct drm_mode_config {
*/
struct drm_property *writeback_out_fence_ptr_property;
/**
* @hdr_output_metadata_property: Connector property containing hdr
* metatada. This will be provided by userspace compositors based
* on HDR content
*/
struct drm_property *hdr_output_metadata_property;
/**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
struct drm_property *content_protection_property;
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
/**
* @prefer_shadow_fbdev:
*
* Hint to framebuffer emulation to prefer shadow-fb rendering.
*/
bool prefer_shadow_fbdev;
/**
* @quirk_addfb_prefer_xbgr_30bpp:
*

View File

@@ -537,7 +537,7 @@ void drm_connector_list_update(struct drm_connector *connector);
/* parsing cmdline modes */
bool
drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_connector *connector,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode);
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,

View File

@@ -679,6 +679,52 @@ struct drm_encoder_helper_funcs {
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
/**
* @atomic_disable:
*
* This callback should be used to disable the encoder. With the atomic
* drivers it is called before this encoder's CRTC has been shut off
* using their own &drm_crtc_helper_funcs.atomic_disable hook. If that
* sequence is too simple drivers can just add their own driver private
* encoder hooks and call them from CRTC's callback by looping over all
* encoders connected to it using for_each_encoder_on_crtc().
*
* This callback is a variant of @disable that provides the atomic state
* to the driver. If @atomic_disable is implemented, @disable is not
* called by the helpers.
*
* This hook is only used by atomic helpers. Atomic drivers don't need
* to implement it if there's no need to disable anything at the encoder
* level. To ensure that runtime PM handling (using either DPMS or the
* new "ACTIVE" property) works @atomic_disable must be the inverse of
* @atomic_enable.
*/
void (*atomic_disable)(struct drm_encoder *encoder,
struct drm_atomic_state *state);
/**
* @atomic_enable:
*
* This callback should be used to enable the encoder. It is called
* after this encoder's CRTC has been enabled using their own
* &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is
* too simple drivers can just add their own driver private encoder
* hooks and call them from CRTC's callback by looping over all encoders
* connected to it using for_each_encoder_on_crtc().
*
* This callback is a variant of @enable that provides the atomic state
* to the driver. If @atomic_enable is implemented, @enable is not
* called by the helpers.
*
* This hook is only used by atomic helpers, it is the opposite of
* @atomic_disable. Atomic drivers don't need to implement it if there's
* no need to enable anything at the encoder level. To ensure that
* runtime PM handling works @atomic_enable must be the inverse of
* @atomic_disable.
*/
void (*atomic_enable)(struct drm_encoder *encoder,
struct drm_atomic_state *state);
/**
* @disable:
*
@@ -695,6 +741,9 @@ struct drm_encoder_helper_funcs {
* handling (using either DPMS or the new "ACTIVE" property) works
* @disable must be the inverse of @enable for atomic drivers.
*
* For atomic drivers also consider @atomic_disable and save yourself
* from having to read the NOTE below!
*
* NOTE:
*
* With legacy CRTC helpers there's a big semantic difference between
@@ -719,11 +768,11 @@ struct drm_encoder_helper_funcs {
* hooks and call them from CRTC's callback by looping over all encoders
* connected to it using for_each_encoder_on_crtc().
*
* This hook is used only by atomic helpers, for symmetry with @disable.
* Atomic drivers don't need to implement it if there's no need to
* enable anything at the encoder level. To ensure that runtime PM handling
* (using either DPMS or the new "ACTIVE" property) works
* @enable must be the inverse of @disable for atomic drivers.
* This hook is only used by atomic helpers, it is the opposite of
* @disable. Atomic drivers don't need to implement it if there's no
* need to enable anything at the encoder level. To ensure that
* runtime PM handling (using either DPMS or the new "ACTIVE" property)
* works @enable must be the inverse of @disable for atomic drivers.
*/
void (*enable)(struct drm_encoder *encoder);
@@ -979,7 +1028,7 @@ struct drm_connector_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_connector *connector,
struct drm_connector_state *state);
struct drm_atomic_state *state);
/**
* @atomic_commit:

View File

@@ -69,7 +69,7 @@ struct drm_plane_state {
*
* Optional fence to wait for before scanning out @fb. The core atomic
* code will set this when userspace is using explicit fencing. Do not
* write this directly for a driver's implicit fence, use
* write this field directly for a driver's implicit fence, use
* drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
* preserved.
*

View File

@@ -32,6 +32,8 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <drm/drm.h>
/**
* DOC: print
*

View File

@@ -0,0 +1,20 @@
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2019 Google, Inc.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
*/
#ifndef DRM_SELF_REFRESH_HELPER_H_
#define DRM_SELF_REFRESH_HELPER_H_
struct drm_atomic_state;
struct drm_crtc;
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
int drm_self_refresh_helper_init(struct drm_crtc *crtc,
unsigned int entry_delay_ms);
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
#endif

View File

@@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef DRM_VRAM_MM_HELPER_H
#define DRM_VRAM_MM_HELPER_H
#include <drm/ttm/ttm_bo_driver.h>
struct drm_device;
/**
* struct drm_vram_mm_funcs - Callback functions for &struct drm_vram_mm
* @evict_flags: Provides an implementation for struct \
&ttm_bo_driver.evict_flags
* @verify_access: Provides an implementation for \
struct &ttm_bo_driver.verify_access
*
* These callback function integrate VRAM MM with TTM buffer objects. New
* functions can be added if necessary.
*/
struct drm_vram_mm_funcs {
void (*evict_flags)(struct ttm_buffer_object *bo,
struct ttm_placement *placement);
int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp);
};
/**
* struct drm_vram_mm - An instance of VRAM MM
* @vram_base: Base address of the managed video memory
* @vram_size: Size of the managed video memory in bytes
* @bdev: The TTM BO device.
* @funcs: TTM BO functions
*
* The fields &struct drm_vram_mm.vram_base and
* &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
* available for public read access. Use the field
* &struct drm_vram_mm.bdev to access the TTM BO device.
*/
struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
struct ttm_bo_device bdev;
const struct drm_vram_mm_funcs *funcs;
};
/**
* drm_vram_mm_of_bdev() - \
Returns the container of type &struct ttm_bo_device for field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
struct ttm_bo_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
uint64_t vram_base, size_t vram_size,
const struct drm_vram_mm_funcs *funcs);
void drm_vram_mm_cleanup(struct drm_vram_mm *vmm);
int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
struct drm_vram_mm *vmm);
/*
* Helpers for integration with struct drm_device
*/
struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size,
const struct drm_vram_mm_funcs *funcs);
void drm_vram_helper_release_mm(struct drm_device *dev);
/*
* Helpers for &struct file_operations
*/
int drm_vram_mm_file_operations_mmap(
struct file *filp, struct vm_area_struct *vma);
/**
* define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \
&struct file_operations
*
* Drivers that use VRAM MM can use this macro to initialize
* &struct file_operations with default functions.
*/
#define DRM_VRAM_MM_FILE_OPERATIONS \
.llseek = no_llseek, \
.read = drm_read, \
.poll = drm_poll, \
.unlocked_ioctl = drm_ioctl, \
.compat_ioctl = drm_compat_ioctl, \
.mmap = drm_vram_mm_file_operations_mmap, \
.open = drm_open, \
.release = drm_release \
#endif

View File

@@ -167,9 +167,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
* @finish_work: schedules the function @drm_sched_job_finish once the job has
* finished to remove the job from the
* @drm_gpu_scheduler.ring_mirror_list.
* @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
@@ -188,7 +185,6 @@ struct drm_sched_job {
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
uint64_t id;
atomic_t karma;
@@ -263,6 +259,7 @@ struct drm_sched_backend_ops {
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,6 +280,7 @@ struct drm_gpu_scheduler {
int hang_limit;
atomic_t num_jobs;
bool ready;
bool free_guilty;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
@@ -296,7 +294,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
void *owner);
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);

View File

@@ -559,7 +559,6 @@
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A5D, info), \
INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
@@ -573,7 +572,8 @@
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x8A51, info)
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
/* EHL */
#define INTEL_EHL_IDS(info) \

View File

@@ -767,11 +767,12 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->bdev->glob->lru_lock);
spin_lock(&bo->bdev->glob->lru_lock);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->bdev->glob->lru_lock);
}
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
reservation_object_unlock(bo->resv);
}

View File

@@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
* @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups);
struct list_head *dups, bool del_lru);
/**
* function ttm_eu_fence_buffer_objects.

View File

@@ -187,6 +187,7 @@
#define CLK_MIPI_HSI 349 /* Exynos4210 only */
#define CLK_PIXELASYNCM0 351
#define CLK_PIXELASYNCM1 352
#define CLK_ASYNC_G3D 353 /* Exynos4x12 only */
#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */
#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */
#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */

View File

@@ -60,6 +60,7 @@
#define CLK_MAU_EPLL 159
#define CLK_SCLK_HSIC_12M 160
#define CLK_SCLK_MPHY_IXTAL24 161
#define CLK_SCLK_BPLL 162
/* gate clocks */
#define CLK_UART0 257
@@ -195,6 +196,16 @@
#define CLK_ACLK432_CAM 518
#define CLK_ACLK_FL1550_CAM 519
#define CLK_ACLK550_CAM 520
#define CLK_CLKM_PHY0 521
#define CLK_CLKM_PHY1 522
#define CLK_ACLK_PPMU_DREX0_0 523
#define CLK_ACLK_PPMU_DREX0_1 524
#define CLK_ACLK_PPMU_DREX1_0 525
#define CLK_ACLK_PPMU_DREX1_1 526
#define CLK_PCLK_PPMU_DREX0_0 527
#define CLK_PCLK_PPMU_DREX0_1 528
#define CLK_PCLK_PPMU_DREX1_0 529
#define CLK_PCLK_PPMU_DREX1_1 530
/* mux clocks */
#define CLK_MOUT_HDMI 640
@@ -217,6 +228,8 @@
#define CLK_MOUT_EPLL 657
#define CLK_MOUT_MAU_EPLL 658
#define CLK_MOUT_USER_MAU_EPLL 659
#define CLK_MOUT_SCLK_SPLL 660
#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661
/* divider clocks */
#define CLK_DOUT_PIXEL 768
@@ -248,8 +261,11 @@
#define CLK_DOUT_CCLK_DREX0 794
#define CLK_DOUT_CLK2X_PHY0 795
#define CLK_DOUT_PCLK_CORE_MEM 796
#define CLK_FF_DOUT_SPLL2 797
#define CLK_DOUT_PCLK_DREX0 798
#define CLK_DOUT_PCLK_DREX1 799
/* must be greater than maximal clock id */
#define CLK_NR_CLKS 797
#define CLK_NR_CLKS 800
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */

View File

@@ -136,5 +136,6 @@
#define CLKID_VDEC_1 204
#define CLKID_VDEC_HEVC 207
#define CLKID_VDEC_HEVCF 210
#define CLKID_TS 212
#endif /* __G12A_CLKC_H */

View File

@@ -239,6 +239,15 @@
#define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222
#define IMX8MM_CLK_END 223
#define IMX8MM_CLK_GPIO1_ROOT 223
#define IMX8MM_CLK_GPIO2_ROOT 224
#define IMX8MM_CLK_GPIO3_ROOT 225
#define IMX8MM_CLK_GPIO4_ROOT 226
#define IMX8MM_CLK_GPIO5_ROOT 227
#define IMX8MM_CLK_SNVS_ROOT 228
#define IMX8MM_CLK_GIC 229
#define IMX8MM_CLK_END 230
#endif

View File

@@ -400,5 +400,8 @@
#define IMX8MQ_CLK_GPIO4_ROOT 262
#define IMX8MQ_CLK_GPIO5_ROOT 263
#define IMX8MQ_CLK_END 264
#define IMX8MQ_CLK_SNVS_ROOT 264
#define IMX8MQ_CLK_GIC 265
#define IMX8MQ_CLK_END 266
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */

View File

@@ -112,5 +112,8 @@
#define CLKID_VDEC_HCODEC 199
#define CLKID_VDEC_2 202
#define CLKID_VDEC_HEVC 206
#define CLKID_CTS_AMCLK 209
#define CLKID_CTS_MCLK_I958 212
#define CLKID_CTS_I958 213
#endif /* __MESON8B_CLKC_H */

View File

@@ -208,4 +208,21 @@
#define CLK_TOP_MSDC2_INFRA 176
#define CLK_TOP_NR_CLK 177
/* AUDSYS */
#define CLK_AUD_AFE 0
#define CLK_AUD_I2S 1
#define CLK_AUD_22M 2
#define CLK_AUD_24M 3
#define CLK_AUD_INTDIR 4
#define CLK_AUD_APLL2_TUNER 5
#define CLK_AUD_APLL_TUNER 6
#define CLK_AUD_HDMI 7
#define CLK_AUD_SPDF 8
#define CLK_AUD_ADC 9
#define CLK_AUD_DAC 10
#define CLK_AUD_DAC_PREDIS 11
#define CLK_AUD_TML 12
#define CLK_AUD_NR_CLK 13
#endif /* _DT_BINDINGS_CLK_MT8516_H */

View File

@@ -166,5 +166,12 @@
#define GCC_PCIEPHY_0_PHY_BCR 12
#define GCC_EMAC_BCR 13
#define GCC_CDSP_RESTART 14
#define GCC_PCIE_0_AXI_MASTER_STICKY_ARES 15
#define GCC_PCIE_0_AHB_ARES 16
#define GCC_PCIE_0_AXI_SLAVE_ARES 17
#define GCC_PCIE_0_AXI_MASTER_ARES 18
#define GCC_PCIE_0_CORE_STICKY_ARES 19
#define GCC_PCIE_0_SLEEP_ARES 20
#define GCC_PCIE_0_PIPE_ARES 21
#endif

View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019, Jeffrey Hugo
*/
#ifndef _DT_BINDINGS_CLK_MSM_GPUCC_8998_H
#define _DT_BINDINGS_CLK_MSM_GPUCC_8998_H
#define GPUPLL0 0
#define GPUPLL0_OUT_EVEN 1
#define RBCPR_CLK_SRC 2
#define GFX3D_CLK_SRC 3
#define RBBMTIMER_CLK_SRC 4
#define GFX3D_ISENSE_CLK_SRC 5
#define RBCPR_CLK 6
#define GFX3D_CLK 7
#define RBBMTIMER_CLK 8
#define GFX3D_ISENSE_CLK 9
#define GPUCC_CXO_CLK 10
#define GPU_CX_BCR 0
#define RBCPR_BCR 1
#define GPU_GX_BCR 2
#define GPU_ISENSE_BCR 3
#define GPU_CX_GDSC 1
#define GPU_GX_GDSC 2
#endif

View File

@@ -64,6 +64,7 @@
#define SCLK_WIFI 141
#define SCLK_OTGPHY0 142
#define SCLK_OTGPHY1 143
#define SCLK_HDMI_PHY 144
/* dclk gates */
#define DCLK_VOP 190

View File

@@ -164,6 +164,7 @@
#define PCLK_DCF 233
#define PCLK_SARADC 234
#define PCLK_ACODECPHY 235
#define PCLK_WDT 236
/* hclk gates */
#define HCLK_PERI 308

View File

@@ -79,6 +79,8 @@
#define STRATIX10_USB_CLK 59
#define STRATIX10_SPI_M_CLK 60
#define STRATIX10_NAND_CLK 61
#define STRATIX10_NUM_CLKS 62
#define STRATIX10_NAND_X_CLK 62
#define STRATIX10_NAND_ECC_CLK 63
#define STRATIX10_NUM_CLKS 64
#endif /* __STRATIX10_CLOCK_H */

View File

@@ -41,34 +41,6 @@
#define TEGRA186_MAIN_GPIO(port, offset) \
((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset)
/* need to keep these for backwards-compatibility */
#define TEGRA_MAIN_GPIO_PORT_A 0
#define TEGRA_MAIN_GPIO_PORT_B 1
#define TEGRA_MAIN_GPIO_PORT_C 2
#define TEGRA_MAIN_GPIO_PORT_D 3
#define TEGRA_MAIN_GPIO_PORT_E 4
#define TEGRA_MAIN_GPIO_PORT_F 5
#define TEGRA_MAIN_GPIO_PORT_G 6
#define TEGRA_MAIN_GPIO_PORT_H 7
#define TEGRA_MAIN_GPIO_PORT_I 8
#define TEGRA_MAIN_GPIO_PORT_J 9
#define TEGRA_MAIN_GPIO_PORT_K 10
#define TEGRA_MAIN_GPIO_PORT_L 11
#define TEGRA_MAIN_GPIO_PORT_M 12
#define TEGRA_MAIN_GPIO_PORT_N 13
#define TEGRA_MAIN_GPIO_PORT_O 14
#define TEGRA_MAIN_GPIO_PORT_P 15
#define TEGRA_MAIN_GPIO_PORT_Q 16
#define TEGRA_MAIN_GPIO_PORT_R 17
#define TEGRA_MAIN_GPIO_PORT_T 18
#define TEGRA_MAIN_GPIO_PORT_X 19
#define TEGRA_MAIN_GPIO_PORT_Y 20
#define TEGRA_MAIN_GPIO_PORT_BB 21
#define TEGRA_MAIN_GPIO_PORT_CC 22
#define TEGRA_MAIN_GPIO(port, offset) \
((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
/* GPIOs implemented by AON GPIO controller */
#define TEGRA186_AON_GPIO_PORT_S 0
#define TEGRA186_AON_GPIO_PORT_U 1
@@ -82,17 +54,4 @@
#define TEGRA186_AON_GPIO(port, offset) \
((TEGRA186_AON_GPIO_PORT_##port * 8) + offset)
/* need to keep these for backwards-compatibility */
#define TEGRA_AON_GPIO_PORT_S 0
#define TEGRA_AON_GPIO_PORT_U 1
#define TEGRA_AON_GPIO_PORT_V 2
#define TEGRA_AON_GPIO_PORT_W 3
#define TEGRA_AON_GPIO_PORT_Z 4
#define TEGRA_AON_GPIO_PORT_AA 5
#define TEGRA_AON_GPIO_PORT_EE 6
#define TEGRA_AON_GPIO_PORT_FF 7
#define TEGRA_AON_GPIO(port, offset) \
((TEGRA_AON_GPIO_PORT_##port * 8) + offset)
#endif

View File

@@ -48,4 +48,6 @@
#define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA
#define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB
#define DP83867_CLK_O_SEL_REF_CLK 0xC
/* Special flag to indicate clock should be off */
#define DP83867_CLK_O_SEL_OFF 0xFFFFFFFF
#endif

View File

@@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Linaro Ltd. */
#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
#define AOSS_QMP_LS_CDSP 0
#define AOSS_QMP_LS_LPASS 1
#define AOSS_QMP_LS_MODEM 2
#define AOSS_QMP_LS_SLPI 3
#define AOSS_QMP_LS_SPSS 4
#define AOSS_QMP_LS_VENUS 5
#endif

View File

@@ -36,4 +36,38 @@
#define MSM8996_VDDSSCX 5
#define MSM8996_VDDSSCX_VFC 6
/* MSM8998 Power Domain Indexes */
#define MSM8998_VDDCX 0
#define MSM8998_VDDCX_AO 1
#define MSM8998_VDDCX_VFL 2
#define MSM8998_VDDMX 3
#define MSM8998_VDDMX_AO 4
#define MSM8998_VDDMX_VFL 5
#define MSM8998_SSCCX 6
#define MSM8998_SSCCX_VFL 7
#define MSM8998_SSCMX 8
#define MSM8998_SSCMX_VFL 9
/* QCS404 Power Domains */
#define QCS404_VDDMX 0
#define QCS404_VDDMX_AO 1
#define QCS404_VDDMX_VFL 2
#define QCS404_LPICX 3
#define QCS404_LPICX_VFL 4
#define QCS404_LPIMX 5
#define QCS404_LPIMX_VFL 6
/* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16
#define RPM_SMD_LEVEL_RETENTION_PLUS 32
#define RPM_SMD_LEVEL_MIN_SVS 48
#define RPM_SMD_LEVEL_LOW_SVS 64
#define RPM_SMD_LEVEL_SVS 128
#define RPM_SMD_LEVEL_SVS_PLUS 192
#define RPM_SMD_LEVEL_NOM 256
#define RPM_SMD_LEVEL_NOM_PLUS 320
#define RPM_SMD_LEVEL_TURBO 384
#define RPM_SMD_LEVEL_TURBO_NO_CPR 416
#define RPM_SMD_LEVEL_BINNING 512
#endif

View File

@@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2018 Bitmain Ltd.
* Copyright (c) 2019 Linaro Ltd.
*/
#ifndef _DT_BINDINGS_BM1880_RESET_H
#define _DT_BINDINGS_BM1880_RESET_H
#define BM1880_RST_MAIN_AP 0
#define BM1880_RST_SECOND_AP 1
#define BM1880_RST_DDR 2
#define BM1880_RST_VIDEO 3
#define BM1880_RST_JPEG 4
#define BM1880_RST_VPP 5
#define BM1880_RST_GDMA 6
#define BM1880_RST_AXI_SRAM 7
#define BM1880_RST_TPU 8
#define BM1880_RST_USB 9
#define BM1880_RST_ETH0 10
#define BM1880_RST_ETH1 11
#define BM1880_RST_NAND 12
#define BM1880_RST_EMMC 13
#define BM1880_RST_SD 14
#define BM1880_RST_SDMA 15
#define BM1880_RST_I2S0 16
#define BM1880_RST_I2S1 17
#define BM1880_RST_UART0_1_CLK 18
#define BM1880_RST_UART0_1_ACLK 19
#define BM1880_RST_UART2_3_CLK 20
#define BM1880_RST_UART2_3_ACLK 21
#define BM1880_RST_MINER 22
#define BM1880_RST_I2C0 23
#define BM1880_RST_I2C1 24
#define BM1880_RST_I2C2 25
#define BM1880_RST_I2C3 26
#define BM1880_RST_I2C4 27
#define BM1880_RST_PWM0 28
#define BM1880_RST_PWM1 29
#define BM1880_RST_PWM2 30
#define BM1880_RST_PWM3 31
#define BM1880_RST_SPI 32
#define BM1880_RST_GPIO0 33
#define BM1880_RST_GPIO1 34
#define BM1880_RST_GPIO2 35
#define BM1880_RST_EFUSE 36
#define BM1880_RST_WDT 37
#define BM1880_RST_AHB_ROM 38
#define BM1880_RST_SPIC 39
#endif /* _DT_BINDINGS_BM1880_RESET_H */

View File

@@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Device Tree defines for Madera codecs
*
* Copyright (C) 2016-2017 Cirrus Logic, Inc. and
* Cirrus Logic International Semiconductor Ltd.
*/
#ifndef DT_BINDINGS_SOUND_MADERA_H
#define DT_BINDINGS_SOUND_MADERA_H
#define MADERA_INMODE_DIFF 0
#define MADERA_INMODE_SE 1
#define MADERA_INMODE_DMIC 2
#define MADERA_DMIC_REF_MICVDD 0
#define MADERA_DMIC_REF_MICBIAS1 1
#define MADERA_DMIC_REF_MICBIAS2 2
#define MADERA_DMIC_REF_MICBIAS3 3
#define CS47L35_DMIC_REF_MICBIAS1B 1
#define CS47L35_DMIC_REF_MICBIAS2A 2
#define CS47L35_DMIC_REF_MICBIAS2B 3
#endif

View File

@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DT_MESON_G12A_TOHDMITX_H
#define __DT_MESON_G12A_TOHDMITX_H
#define TOHDMITX_I2S_IN_A 0
#define TOHDMITX_I2S_IN_B 1
#define TOHDMITX_I2S_IN_C 2
#define TOHDMITX_I2S_OUT 3
#define TOHDMITX_SPDIF_IN_A 4
#define TOHDMITX_SPDIF_IN_B 5
#define TOHDMITX_SPDIF_OUT 6
#endif /* __DT_MESON_G12A_TOHDMITX_H */

View File

@@ -14,6 +14,7 @@
* Authorisation record for request_key().
*/
struct request_key_auth {
struct rcu_head rcu;
struct key *target_key;
struct key *dest_keyring;
const struct cred *cred;

View File

@@ -11,18 +11,19 @@
#include <asm/perf_event.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
#ifdef CONFIG_KVM_ARM_PMU
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
u64 bitmask;
};
struct kvm_pmu {
int irq_num;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
bool ready;
bool created;
bool irq_level;
@@ -33,10 +34,11 @@ struct kvm_pmu {
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -70,10 +72,11 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)

View File

@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)

View File

@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
#include <linux/irqdomain.h>
#include <linux/resource_ext.h>
#include <linux/device.h>
#include <linux/property.h>
@@ -314,10 +315,19 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
void acpi_set_irq_model(enum acpi_irq_model_id model,
struct fwnode_handle *fwnode);
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size,
struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops,
void *host_data);
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
return -1;
}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
@@ -367,6 +377,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid,
extern acpi_status wmi_remove_notify_handler(const char *guid);
extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
#endif /* CONFIG_ACPI_WMI */
@@ -913,31 +924,21 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
int acpi_dev_suspend_late(struct device *dev);
int acpi_subsys_prepare(struct device *dev);
void acpi_subsys_complete(struct device *dev);
int acpi_subsys_suspend_late(struct device *dev);
int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_resume_noirq(struct device *dev);
int acpi_subsys_resume_early(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_freeze_late(struct device *dev);
int acpi_subsys_freeze_noirq(struct device *dev);
int acpi_subsys_thaw_noirq(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
#else
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
#endif
#ifdef CONFIG_ACPI
@@ -1303,6 +1304,7 @@ static inline int lpit_read_residency_count_address(u64 *address)
#ifdef CONFIG_ACPI_PPTT
int find_acpi_cpu_topology(unsigned int cpu, int level);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
#else
static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
@@ -1313,6 +1315,10 @@ static inline int find_acpi_cpu_topology_package(unsigned int cpu)
{
return -EINVAL;
}
static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return -EINVAL;
}
static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
{
return -EINVAL;

View File

@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale);
struct sched_domain;
static inline
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}

View File

@@ -182,6 +182,9 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
}
extern u32 audit_enabled;
extern int audit_signal_info(int sig, struct task_struct *t);
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -235,6 +238,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
}
#define audit_enabled AUDIT_OFF
static inline int audit_signal_info(int sig, struct task_struct *t)
{
return 0;
}
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC

View File

@@ -61,12 +61,14 @@ enum virtchnl_status_code {
#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
@@ -76,6 +78,8 @@ enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */

View File

@@ -203,7 +203,6 @@ struct backing_dev_info {
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
struct dentry *debug_stats;
#endif
};

View File

@@ -48,6 +48,7 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{

View File

@@ -64,6 +64,10 @@ extern struct page *balloon_page_alloc(void);
extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page);
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
struct list_head *pages);
extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
struct list_head *pages, size_t n_req_pages);
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{

View File

@@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
static inline bool bio_full(struct bio *bio)
/**
* bio_full - check if the bio is full
* @bio: bio to check
* @len: length of one segment to be added
*
* Return true if @bio is full and one segment with @len bytes can't be
* added to the bio, otherwise return false
*/
static inline bool bio_full(struct bio *bio, unsigned len)
{
return bio->bi_vcnt >= bio->bi_max_vecs;
if (bio->bi_vcnt >= bio->bi_max_vecs)
return true;
if (bio->bi_iter.bi_size > UINT_MAX - len)
return true;
return false;
}
static inline bool bio_next_segment(const struct bio *bio,
@@ -408,7 +422,6 @@ static inline void bio_wouldblock_error(struct bio *bio)
}
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int submit_bio_wait(struct bio *bio);
extern void bio_advance(struct bio *, unsigned);
@@ -427,6 +440,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_release_pages(struct bio *bio, bool mark_dirty);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
struct iov_iter *, gfp_t);
@@ -444,17 +458,6 @@ void generic_end_io_acct(struct request_queue *q, int op,
struct hd_struct *part,
unsigned long start_time);
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
extern void bio_flush_dcache_pages(struct bio *bi);
#else
static inline void bio_flush_dcache_pages(struct bio *bi)
{
}
#endif
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);

View File

@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
#include <linux/const.h>
#include <asm/bitsperlong.h>
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
@@ -17,10 +19,11 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) - (1ULL << (l)) + 1) & \
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif /* __LINUX_BITS_H */

View File

@@ -63,19 +63,17 @@ struct blkcg {
/*
* blkg_[rw]stat->aux_cnt is excluded for local stats but included for
* recursive. Used to carry stats of dead children, and, for blkg_rwstat,
* to carry result values from read and sum operations.
* recursive. Used to carry stats of dead children.
*/
struct blkg_stat {
struct percpu_counter cpu_cnt;
atomic64_t aux_cnt;
};
struct blkg_rwstat {
struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
atomic64_t aux_cnt[BLKG_RWSTAT_NR];
};
struct blkg_rwstat_sample {
u64 cnt[BLKG_RWSTAT_NR];
};
/*
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
* request_queue (q). This is used by blkcg policies which need to track
@@ -134,13 +132,17 @@ struct blkcg_gq {
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
spinlock_t async_bio_lock;
struct bio_list async_bios;
struct work_struct async_bio_work;
atomic_t use_delay;
atomic64_t delay_nsec;
atomic64_t delay_start;
u64 last_delay;
int last_use;
struct rcu_head rcu_head;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -179,6 +181,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
@@ -198,6 +201,13 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
unsigned int idx)
{
return atomic64_read(&rwstat->aux_cnt[idx]) +
percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
}
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
@@ -206,8 +216,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
bool show_total);
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
const struct blkg_rwstat *rwstat);
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
const struct blkg_rwstat_sample *rwstat);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off);
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
@@ -215,10 +224,8 @@ int blkg_print_stat_ios(struct seq_file *sf, void *v);
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
struct blkcg_policy *pol, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
struct blkcg_policy *pol, int off);
void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
int off, struct blkg_rwstat_sample *sum);
struct blkg_conf_ctx {
struct gendisk *disk;
@@ -569,69 +576,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
int ret;
ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
if (ret)
return ret;
atomic64_set(&stat->aux_cnt, 0);
return 0;
}
static inline void blkg_stat_exit(struct blkg_stat *stat)
{
percpu_counter_destroy(&stat->cpu_cnt);
}
/**
* blkg_stat_add - add a value to a blkg_stat
* @stat: target blkg_stat
* @val: value to add
*
* Add @val to @stat. The caller must ensure that IRQ on the same CPU
* don't re-enter this function for the same counter.
*/
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_stat_read - read the current value of a blkg_stat
* @stat: blkg_stat to read
*/
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
return percpu_counter_sum_positive(&stat->cpu_cnt);
}
/**
* blkg_stat_reset - reset a blkg_stat
* @stat: blkg_stat to reset
*/
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
percpu_counter_set(&stat->cpu_cnt, 0);
atomic64_set(&stat->aux_cnt, 0);
}
/**
* blkg_stat_add_aux - add a blkg_stat into another's aux count
* @to: the destination blkg_stat
* @from: the source
*
* Add @from's count including the aux one to @to's aux count.
*/
static inline void blkg_stat_add_aux(struct blkg_stat *to,
struct blkg_stat *from)
{
atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
&to->aux_cnt);
}
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
{
int i, ret;
@@ -693,15 +637,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
*
* Read the current snapshot of @rwstat and return it in the aux counts.
*/
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
struct blkg_rwstat_sample *result)
{
struct blkg_rwstat result;
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
atomic64_set(&result.aux_cnt[i],
percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
return result;
result->cnt[i] =
percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
}
/**
@@ -714,10 +657,10 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
*/
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
struct blkg_rwstat_sample tmp = { };
return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
blkg_rwstat_read(rwstat, &tmp);
return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}
/**
@@ -763,6 +706,15 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
bool __blkcg_punt_bio_submit(struct bio *bio);
static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
if (bio->bi_opf & REQ_CGROUP_PUNT)
return __blkcg_punt_bio_submit(bio);
else
return false;
}
static inline void blkcg_bio_issue_init(struct bio *bio)
{
@@ -910,6 +862,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }

View File

@@ -306,7 +306,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs
bool blk_mq_complete_request(struct request *rq);
void blk_mq_complete_request_sync(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
struct bio *bio, unsigned int nr_segs);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);

View File

@@ -154,11 +154,6 @@ struct bio {
blk_status_t bi_status;
u8 bi_partno;
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned int bi_phys_segments;
struct bvec_iter bi_iter;
atomic_t __bi_remaining;
@@ -210,7 +205,6 @@ struct bio {
*/
enum {
BIO_NO_PAGE_REF, /* don't put release vec pages */
BIO_SEG_VALID, /* bi_phys_segments valid */
BIO_CLONED, /* doesn't own data */
BIO_BOUNCED, /* bio is a bounce bio */
BIO_USER_MAPPED, /* contains user pages */
@@ -317,6 +311,15 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
__REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
* can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
* submit_bio() punt the actual issuing to a dedicated per-blkcg
* work item to avoid such priority inversions.
*/
__REQ_CGROUP_PUNT,
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
@@ -343,6 +346,9 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_HIPRI (1ULL << __REQ_HIPRI)
@@ -414,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
return cookie != BLK_QC_T_NONE;
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)

View File

@@ -137,11 +137,11 @@ struct request {
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
int tag;
int internal_tag;
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
sector_t __sector; /* sector cursor */
struct bio *bio;
@@ -344,10 +344,15 @@ struct queue_limits {
#ifdef CONFIG_BLK_DEV_ZONED
/*
* Maximum number of zones to report with a single report zones command.
*/
#define BLK_ZONED_REPORT_MAX_ZONES 8192U
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
unsigned int *nr_zones);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
@@ -681,7 +686,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -828,7 +833,6 @@ extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
blk_mq_req_flags_t flags);
@@ -842,7 +846,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
unsigned int, void __user *);
@@ -867,6 +870,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(unsigned int op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
@@ -1026,21 +1032,9 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
*
* blk_update_request() completes given number of bytes and updates
* the request without completing it.
*
* blk_end_request() and friends. __blk_end_request() must be called
* with the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
* blk_end_request() for parts of the original function.
* This prevents code duplication in drivers.
*/
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
@@ -1429,7 +1423,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1684,8 +1678,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
struct blk_zone *zones, unsigned int *nr_zones);
struct module *owner;
const struct pr_ops *pr_ops;
};

View File

@@ -6,6 +6,7 @@
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
#include <linux/percpu-refcount.h>
#include <linux/rbtree.h>
#include <uapi/linux/bpf.h>
@@ -71,11 +72,17 @@ struct cgroup_bpf {
u32 flags[MAX_BPF_ATTACH_TYPE];
/* temp storage for effective prog array used by prog_attach/detach */
struct bpf_prog_array __rcu *inactive;
struct bpf_prog_array *inactive;
/* reference counter used to detach bpf programs after cgroup removal */
struct percpu_ref refcnt;
/* cgroup_bpf is released using a work queue */
struct work_struct release_work;
};
void cgroup_bpf_put(struct cgroup *cgrp);
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
@@ -117,6 +124,14 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
loff_t *ppos, void **new_buf,
enum bpf_attach_type type);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char __user *optval,
int *optlen, char **kernel_optval);
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int optname, char __user *optval,
int __user *optlen, int max_optlen,
int retval);
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -279,6 +294,38 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
__ret; \
})
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
kernel_optval); \
__ret; \
})
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
get_user(__ret, optlen); \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
max_optlen, retval) \
({ \
int __ret = retval; \
if (cgroup_bpf_enabled) \
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
optname, optval, \
optlen, max_optlen, \
retval); \
__ret; \
})
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
@@ -289,8 +336,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
struct bpf_prog;
struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype,
@@ -350,6 +397,11 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
#define for_each_cgroup_storage_type(stype) for (; false; )

View File

@@ -63,6 +63,11 @@ struct bpf_map_ops {
u64 imm, u32 *off);
};
struct bpf_map_memory {
u32 pages;
struct user_struct *user;
};
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
@@ -83,7 +88,7 @@ struct bpf_map {
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
u32 pages;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen; /* write-once */
/* 48 bytes hole */
@@ -91,8 +96,7 @@ struct bpf_map {
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
struct user_struct *user ____cacheline_aligned;
atomic_t refcnt;
atomic_t refcnt ____cacheline_aligned;
atomic_t usercnt;
struct work_struct work;
char name[BPF_OBJ_NAME_LEN];
@@ -273,6 +277,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
};
/* The information passed from prog-specific *_is_valid_access
@@ -367,6 +372,7 @@ struct bpf_prog_aux {
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
@@ -510,17 +516,18 @@ struct bpf_prog_array {
};
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
__u32 __user *prog_ids, u32 cnt);
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
struct bpf_prog *old_prog);
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array);
@@ -548,6 +555,56 @@ _out: \
_ret; \
})
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
* so BPF programs can request cwr for TCP packets.
*
* Current cgroup skb programs can only return 0 or 1 (0 to drop the
* packet. This macro changes the behavior so the low order bit
* indicates whether the packet should be dropped (0) or not (1)
* and the next bit is a congestion notification bit. This could be
* used by TCP to call tcp_enter_cwr()
*
* Hence, new allowed return values of CGROUP EGRESS BPF programs are:
* 0: drop packet
* 1: keep packet
* 2: drop packet and cn
* 3: keep packet and cn
*
* This macro then converts it to one of the NET_XMIT or an error
* code that is then interpreted as drop packet (and no cn):
* 0: NET_XMIT_SUCCESS skb should be transmitted
* 1: NET_XMIT_DROP skb should be dropped and cn
* 2: NET_XMIT_CN skb should be transmitted and cn
* 3: -EPERM skb should be dropped
*/
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 ret; \
u32 _ret = 1; \
u32 _cn = 0; \
preempt_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
bpf_cgroup_storage_set(_item->cgroup_storage); \
ret = func(_prog, ctx); \
_ret &= (ret & 1); \
_cn |= (ret & 2); \
_item++; \
} \
rcu_read_unlock(); \
preempt_enable(); \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \
_ret = (_cn ? NET_XMIT_DROP : -EPERM); \
_ret; \
})
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
@@ -592,9 +649,12 @@ struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -992,6 +1052,7 @@ extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
@@ -1040,6 +1101,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -1056,6 +1126,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
return false;
}
static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{
return 0;
}
#endif /* CONFIG_INET */
#endif /* _LINUX_BPF_H */

View File

@@ -30,6 +30,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt)
#endif
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)

Some files were not shown because too many files have changed in this diff Show More