Merge branch 'for-5.13/surface-system-aggregator-intergration' into for-linus

- Surface Aggregator Module support from Maximilian Luz
This commit is contained in:
Jiri Kosina
2021-04-29 21:45:19 +02:00
1834 changed files with 64532 additions and 31678 deletions

View File

@@ -50,6 +50,7 @@ mandatory-y += sections.h
mandatory-y += serial.h
mandatory-y += shmparam.h
mandatory-y += simd.h
mandatory-y += softirq_stack.h
mandatory-y += switch_to.h
mandatory-y += timex.h
mandatory-y += tlbflush.h

View File

@@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_NUMA_H
#define __ASM_GENERIC_NUMA_H
#ifdef CONFIG_NUMA
#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
int __node_distance(int from, int to);
#define node_distance(a, b) __node_distance(a, b)
extern nodemask_t numa_nodes_parsed __initdata;
extern bool numa_off;
/* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
void numa_clear_node(unsigned int cpu);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
const struct cpumask *cpumask_of_node(int node);
#else
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
if (node == NUMA_NO_NODE)
return cpu_all_mask;
return node_to_cpumask_map[node];
}
#endif
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
#else /* CONFIG_NUMA */
static inline void numa_store_cpu_info(unsigned int cpu) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arch_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
#endif /* CONFIG_NUMA */
#endif /* __ASM_GENERIC_NUMA_H */

View File

@@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
{
__do_softirq();
}
#endif
#endif

View File

@@ -98,7 +98,7 @@
*/
#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$Lubsan_*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
@@ -803,8 +803,13 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
.debug_addr 0 : { *(.debug_addr) } \
.debug_line_str 0 : { *(.debug_line_str) } \
.debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
.debug_addr 0 : { *(.debug_addr) }
.debug_names 0 : { *(.debug_names) } \
.debug_rnglists 0 : { *(.debug_rnglists) } \
.debug_str_offsets 0 : { *(.debug_str_offsets) }
/* Stabs debugging sections. */
#define STABS_DEBUG \
@@ -963,12 +968,13 @@
#endif
/*
* Clang's -fsanitize=kernel-address and -fsanitize=thread produce
* unwanted sections (.eh_frame and .init_array.*), but
* CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections.
* Clang's -fprofile-arcs, -fsanitize=kernel-address, and
* -fsanitize=thread produce unwanted sections (.eh_frame
* and .init_array.*), but CONFIG_CONSTRUCTORS wants to
* keep any .init_array.* sections.
* https://bugs.llvm.org/show_bug.cgi?id=46478
*/
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
# ifdef CONFIG_CONSTRUCTORS
# define SANITIZER_DISCARDS \
*(.eh_frame)

View File

@@ -12,7 +12,6 @@
#include <linux/keyctl.h>
#include <linux/oid_registry.h>
#include <crypto/akcipher.h>
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.

View File

@@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e,
ktime_t timestamp);
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);

View File

@@ -9,7 +9,6 @@
/*
* Kendryte K210 SoC clock identifiers (arbitrary values).
*/
#define K210_CLK_ACLK 0
#define K210_CLK_CPU 0
#define K210_CLK_SRAM0 1
#define K210_CLK_SRAM1 2

View File

@@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header provides the constants of the standard Chrome OS key matrix
* for cros-ec keyboard-controller bindings.
*
* Copyright (c) 2021 Google, Inc
*/
#ifndef _CROS_EC_KEYBOARD_H
#define _CROS_EC_KEYBOARD_H
#define CROS_STD_TOP_ROW_KEYMAP \
MATRIX_KEY(0x00, 0x02, KEY_F1) \
MATRIX_KEY(0x03, 0x02, KEY_F2) \
MATRIX_KEY(0x02, 0x02, KEY_F3) \
MATRIX_KEY(0x01, 0x02, KEY_F4) \
MATRIX_KEY(0x03, 0x04, KEY_F5) \
MATRIX_KEY(0x02, 0x04, KEY_F6) \
MATRIX_KEY(0x01, 0x04, KEY_F7) \
MATRIX_KEY(0x02, 0x09, KEY_F8) \
MATRIX_KEY(0x01, 0x09, KEY_F9) \
MATRIX_KEY(0x00, 0x04, KEY_F10)
#define CROS_STD_MAIN_KEYMAP \
MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA) \
MATRIX_KEY(0x00, 0x03, KEY_B) \
MATRIX_KEY(0x00, 0x05, KEY_RO) \
MATRIX_KEY(0x00, 0x06, KEY_N) \
MATRIX_KEY(0x00, 0x08, KEY_EQUAL) \
MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) \
MATRIX_KEY(0x01, 0x01, KEY_ESC) \
MATRIX_KEY(0x01, 0x03, KEY_G) \
MATRIX_KEY(0x01, 0x06, KEY_H) \
MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) \
MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) \
MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) \
\
MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) \
MATRIX_KEY(0x02, 0x01, KEY_TAB) \
MATRIX_KEY(0x02, 0x03, KEY_T) \
MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) \
MATRIX_KEY(0x02, 0x06, KEY_Y) \
MATRIX_KEY(0x02, 0x07, KEY_102ND) \
MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) \
MATRIX_KEY(0x02, 0x0a, KEY_YEN) \
\
MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) \
MATRIX_KEY(0x03, 0x01, KEY_GRAVE) \
MATRIX_KEY(0x03, 0x03, KEY_5) \
MATRIX_KEY(0x03, 0x06, KEY_6) \
MATRIX_KEY(0x03, 0x08, KEY_MINUS) \
MATRIX_KEY(0x03, 0x09, KEY_SLEEP) \
MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) \
MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) \
\
MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) \
MATRIX_KEY(0x04, 0x01, KEY_A) \
MATRIX_KEY(0x04, 0x02, KEY_D) \
MATRIX_KEY(0x04, 0x03, KEY_F) \
MATRIX_KEY(0x04, 0x04, KEY_S) \
MATRIX_KEY(0x04, 0x05, KEY_K) \
MATRIX_KEY(0x04, 0x06, KEY_J) \
MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) \
MATRIX_KEY(0x04, 0x09, KEY_L) \
MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH) \
MATRIX_KEY(0x04, 0x0b, KEY_ENTER) \
\
MATRIX_KEY(0x05, 0x01, KEY_Z) \
MATRIX_KEY(0x05, 0x02, KEY_C) \
MATRIX_KEY(0x05, 0x03, KEY_V) \
MATRIX_KEY(0x05, 0x04, KEY_X) \
MATRIX_KEY(0x05, 0x05, KEY_COMMA) \
MATRIX_KEY(0x05, 0x06, KEY_M) \
MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) \
MATRIX_KEY(0x05, 0x08, KEY_SLASH) \
MATRIX_KEY(0x05, 0x09, KEY_DOT) \
MATRIX_KEY(0x05, 0x0b, KEY_SPACE) \
\
MATRIX_KEY(0x06, 0x01, KEY_1) \
MATRIX_KEY(0x06, 0x02, KEY_3) \
MATRIX_KEY(0x06, 0x03, KEY_4) \
MATRIX_KEY(0x06, 0x04, KEY_2) \
MATRIX_KEY(0x06, 0x05, KEY_8) \
MATRIX_KEY(0x06, 0x06, KEY_7) \
MATRIX_KEY(0x06, 0x08, KEY_0) \
MATRIX_KEY(0x06, 0x09, KEY_9) \
MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT) \
MATRIX_KEY(0x06, 0x0b, KEY_DOWN) \
MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) \
\
MATRIX_KEY(0x07, 0x01, KEY_Q) \
MATRIX_KEY(0x07, 0x02, KEY_E) \
MATRIX_KEY(0x07, 0x03, KEY_R) \
MATRIX_KEY(0x07, 0x04, KEY_W) \
MATRIX_KEY(0x07, 0x05, KEY_I) \
MATRIX_KEY(0x07, 0x06, KEY_U) \
MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) \
MATRIX_KEY(0x07, 0x08, KEY_P) \
MATRIX_KEY(0x07, 0x09, KEY_O) \
MATRIX_KEY(0x07, 0x0b, KEY_UP) \
MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
#endif /* _CROS_EC_KEYBOARD_H */

View File

@@ -0,0 +1,105 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Qualcomm interconnect IDs
*
* Copyright (c) 2020, Linaro Ltd.
* Author: Jun Nie <jun.nie@linaro.org>
*/
#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
#define BIMC_SNOC_SLV 0
#define MASTER_QDSS_BAM 1
#define MASTER_QDSS_ETR 2
#define MASTER_SNOC_CFG 3
#define PCNOC_SNOC_SLV 4
#define SLAVE_APSS 5
#define SLAVE_CATS_128 6
#define SLAVE_OCMEM_64 7
#define SLAVE_IMEM 8
#define SLAVE_QDSS_STM 9
#define SLAVE_SRVC_SNOC 10
#define SNOC_BIMC_0_MAS 11
#define SNOC_BIMC_1_MAS 12
#define SNOC_BIMC_2_MAS 13
#define SNOC_INT_0 14
#define SNOC_INT_1 15
#define SNOC_INT_BIMC 16
#define SNOC_PCNOC_MAS 17
#define SNOC_QDSS_INT 18
#define MASTER_VIDEO_P0 0
#define MASTER_JPEG 1
#define MASTER_VFE 2
#define MASTER_MDP_PORT0 3
#define MASTER_MDP_PORT1 4
#define MASTER_CPP 5
#define SNOC_MM_INT_0 6
#define SNOC_MM_INT_1 7
#define SNOC_MM_INT_2 8
#define BIMC_SNOC_MAS 0
#define MASTER_AMPSS_M0 1
#define MASTER_GRAPHICS_3D 2
#define MASTER_TCU0 3
#define SLAVE_AMPSS_L2 4
#define SLAVE_EBI_CH0 5
#define SNOC_BIMC_0_SLV 6
#define SNOC_BIMC_1_SLV 7
#define SNOC_BIMC_2_SLV 8
#define MASTER_BLSP_1 0
#define MASTER_DEHR 1
#define MASTER_LPASS 2
#define MASTER_CRYPTO_CORE0 3
#define MASTER_SDCC_1 4
#define MASTER_SDCC_2 5
#define MASTER_SPDM 6
#define MASTER_USB_HS1 7
#define MASTER_USB_HS2 8
#define PCNOC_INT_0 9
#define PCNOC_INT_1 10
#define PCNOC_MAS_0 11
#define PCNOC_MAS_1 12
#define PCNOC_SLV_0 13
#define PCNOC_SLV_1 14
#define PCNOC_SLV_2 15
#define PCNOC_SLV_3 16
#define PCNOC_SLV_4 17
#define PCNOC_SLV_8 18
#define PCNOC_SLV_9 19
#define PCNOC_SNOC_MAS 20
#define SLAVE_BIMC_CFG 21
#define SLAVE_BLSP_1 22
#define SLAVE_BOOT_ROM 23
#define SLAVE_CAMERA_CFG 24
#define SLAVE_CLK_CTL 25
#define SLAVE_CRYPTO_0_CFG 26
#define SLAVE_DEHR_CFG 27
#define SLAVE_DISPLAY_CFG 28
#define SLAVE_GRAPHICS_3D_CFG 29
#define SLAVE_IMEM_CFG 30
#define SLAVE_LPASS 31
#define SLAVE_MPM 32
#define SLAVE_MSG_RAM 33
#define SLAVE_MSS 34
#define SLAVE_PDM 35
#define SLAVE_PMIC_ARB 36
#define SLAVE_PCNOC_CFG 37
#define SLAVE_PRNG 38
#define SLAVE_QDSS_CFG 39
#define SLAVE_RBCPR_CFG 40
#define SLAVE_SDCC_1 41
#define SLAVE_SDCC_2 42
#define SLAVE_SECURITY 43
#define SLAVE_SNOC_CFG 44
#define SLAVE_SPDM 45
#define SLAVE_TCSR 46
#define SLAVE_TLMM 47
#define SLAVE_USB_HS1 48
#define SLAVE_USB_HS2 49
#define SLAVE_VENUS_CFG 50
#define SNOC_PCNOC_SLV 51
#endif

View File

@@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Qualcomm SDX55 interconnect IDs
*
* Copyright (c) 2021, Linaro Ltd.
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
*/
#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
#define MASTER_TCU_0 0
#define MASTER_SNOC_GC_MEM_NOC 1
#define MASTER_AMPSS_M0 2
#define SLAVE_LLCC 3
#define SLAVE_MEM_NOC_SNOC 4
#define SLAVE_MEM_NOC_PCIE_SNOC 5
#define MASTER_AUDIO 0
#define MASTER_BLSP_1 1
#define MASTER_QDSS_BAM 2
#define MASTER_QPIC 3
#define MASTER_SNOC_CFG 4
#define MASTER_SPMI_FETCHER 5
#define MASTER_ANOC_SNOC 6
#define MASTER_IPA 7
#define MASTER_MEM_NOC_SNOC 8
#define MASTER_MEM_NOC_PCIE_SNOC 9
#define MASTER_CRYPTO_CORE_0 10
#define MASTER_EMAC 11
#define MASTER_IPA_PCIE 12
#define MASTER_PCIE 13
#define MASTER_QDSS_ETR 14
#define MASTER_SDCC_1 15
#define MASTER_USB3 16
#define SLAVE_AOP 17
#define SLAVE_AOSS 18
#define SLAVE_APPSS 19
#define SLAVE_AUDIO 20
#define SLAVE_BLSP_1 21
#define SLAVE_CLK_CTL 22
#define SLAVE_CRYPTO_0_CFG 23
#define SLAVE_CNOC_DDRSS 24
#define SLAVE_ECC_CFG 25
#define SLAVE_EMAC_CFG 26
#define SLAVE_IMEM_CFG 27
#define SLAVE_IPA_CFG 28
#define SLAVE_CNOC_MSS 29
#define SLAVE_PCIE_PARF 30
#define SLAVE_PDM 31
#define SLAVE_PRNG 32
#define SLAVE_QDSS_CFG 33
#define SLAVE_QPIC 34
#define SLAVE_SDCC_1 35
#define SLAVE_SNOC_CFG 36
#define SLAVE_SPMI_FETCHER 37
#define SLAVE_SPMI_VGI_COEX 38
#define SLAVE_TCSR 39
#define SLAVE_TLMM 40
#define SLAVE_USB3 41
#define SLAVE_USB3_PHY_CFG 42
#define SLAVE_ANOC_SNOC 43
#define SLAVE_SNOC_MEM_NOC_GC 44
#define SLAVE_OCIMEM 45
#define SLAVE_SERVICE_SNOC 46
#define SLAVE_PCIE_0 47
#define SLAVE_QDSS_STM 48
#define SLAVE_TCU 49
#define MASTER_IPA_CORE 0
#define SLAVE_IPA_CORE 1
#endif

View File

@@ -0,0 +1,276 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2020 Sean Anderson <seanga2@gmail.com>
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
#ifndef PINCTRL_K210_FPIOA_H
#define PINCTRL_K210_FPIOA_H
/*
* Full list of FPIOA functions from
* kendryte-standalone-sdk/lib/drivers/include/fpioa.h
*/
#define K210_PCF_MASK GENMASK(7, 0)
#define K210_PCF_JTAG_TCLK 0 /* JTAG Test Clock */
#define K210_PCF_JTAG_TDI 1 /* JTAG Test Data In */
#define K210_PCF_JTAG_TMS 2 /* JTAG Test Mode Select */
#define K210_PCF_JTAG_TDO 3 /* JTAG Test Data Out */
#define K210_PCF_SPI0_D0 4 /* SPI0 Data 0 */
#define K210_PCF_SPI0_D1 5 /* SPI0 Data 1 */
#define K210_PCF_SPI0_D2 6 /* SPI0 Data 2 */
#define K210_PCF_SPI0_D3 7 /* SPI0 Data 3 */
#define K210_PCF_SPI0_D4 8 /* SPI0 Data 4 */
#define K210_PCF_SPI0_D5 9 /* SPI0 Data 5 */
#define K210_PCF_SPI0_D6 10 /* SPI0 Data 6 */
#define K210_PCF_SPI0_D7 11 /* SPI0 Data 7 */
#define K210_PCF_SPI0_SS0 12 /* SPI0 Chip Select 0 */
#define K210_PCF_SPI0_SS1 13 /* SPI0 Chip Select 1 */
#define K210_PCF_SPI0_SS2 14 /* SPI0 Chip Select 2 */
#define K210_PCF_SPI0_SS3 15 /* SPI0 Chip Select 3 */
#define K210_PCF_SPI0_ARB 16 /* SPI0 Arbitration */
#define K210_PCF_SPI0_SCLK 17 /* SPI0 Serial Clock */
#define K210_PCF_UARTHS_RX 18 /* UART High speed Receiver */
#define K210_PCF_UARTHS_TX 19 /* UART High speed Transmitter */
#define K210_PCF_RESV6 20 /* Reserved function */
#define K210_PCF_RESV7 21 /* Reserved function */
#define K210_PCF_CLK_SPI1 22 /* Clock SPI1 */
#define K210_PCF_CLK_I2C1 23 /* Clock I2C1 */
#define K210_PCF_GPIOHS0 24 /* GPIO High speed 0 */
#define K210_PCF_GPIOHS1 25 /* GPIO High speed 1 */
#define K210_PCF_GPIOHS2 26 /* GPIO High speed 2 */
#define K210_PCF_GPIOHS3 27 /* GPIO High speed 3 */
#define K210_PCF_GPIOHS4 28 /* GPIO High speed 4 */
#define K210_PCF_GPIOHS5 29 /* GPIO High speed 5 */
#define K210_PCF_GPIOHS6 30 /* GPIO High speed 6 */
#define K210_PCF_GPIOHS7 31 /* GPIO High speed 7 */
#define K210_PCF_GPIOHS8 32 /* GPIO High speed 8 */
#define K210_PCF_GPIOHS9 33 /* GPIO High speed 9 */
#define K210_PCF_GPIOHS10 34 /* GPIO High speed 10 */
#define K210_PCF_GPIOHS11 35 /* GPIO High speed 11 */
#define K210_PCF_GPIOHS12 36 /* GPIO High speed 12 */
#define K210_PCF_GPIOHS13 37 /* GPIO High speed 13 */
#define K210_PCF_GPIOHS14 38 /* GPIO High speed 14 */
#define K210_PCF_GPIOHS15 39 /* GPIO High speed 15 */
#define K210_PCF_GPIOHS16 40 /* GPIO High speed 16 */
#define K210_PCF_GPIOHS17 41 /* GPIO High speed 17 */
#define K210_PCF_GPIOHS18 42 /* GPIO High speed 18 */
#define K210_PCF_GPIOHS19 43 /* GPIO High speed 19 */
#define K210_PCF_GPIOHS20 44 /* GPIO High speed 20 */
#define K210_PCF_GPIOHS21 45 /* GPIO High speed 21 */
#define K210_PCF_GPIOHS22 46 /* GPIO High speed 22 */
#define K210_PCF_GPIOHS23 47 /* GPIO High speed 23 */
#define K210_PCF_GPIOHS24 48 /* GPIO High speed 24 */
#define K210_PCF_GPIOHS25 49 /* GPIO High speed 25 */
#define K210_PCF_GPIOHS26 50 /* GPIO High speed 26 */
#define K210_PCF_GPIOHS27 51 /* GPIO High speed 27 */
#define K210_PCF_GPIOHS28 52 /* GPIO High speed 28 */
#define K210_PCF_GPIOHS29 53 /* GPIO High speed 29 */
#define K210_PCF_GPIOHS30 54 /* GPIO High speed 30 */
#define K210_PCF_GPIOHS31 55 /* GPIO High speed 31 */
#define K210_PCF_GPIO0 56 /* GPIO pin 0 */
#define K210_PCF_GPIO1 57 /* GPIO pin 1 */
#define K210_PCF_GPIO2 58 /* GPIO pin 2 */
#define K210_PCF_GPIO3 59 /* GPIO pin 3 */
#define K210_PCF_GPIO4 60 /* GPIO pin 4 */
#define K210_PCF_GPIO5 61 /* GPIO pin 5 */
#define K210_PCF_GPIO6 62 /* GPIO pin 6 */
#define K210_PCF_GPIO7 63 /* GPIO pin 7 */
#define K210_PCF_UART1_RX 64 /* UART1 Receiver */
#define K210_PCF_UART1_TX 65 /* UART1 Transmitter */
#define K210_PCF_UART2_RX 66 /* UART2 Receiver */
#define K210_PCF_UART2_TX 67 /* UART2 Transmitter */
#define K210_PCF_UART3_RX 68 /* UART3 Receiver */
#define K210_PCF_UART3_TX 69 /* UART3 Transmitter */
#define K210_PCF_SPI1_D0 70 /* SPI1 Data 0 */
#define K210_PCF_SPI1_D1 71 /* SPI1 Data 1 */
#define K210_PCF_SPI1_D2 72 /* SPI1 Data 2 */
#define K210_PCF_SPI1_D3 73 /* SPI1 Data 3 */
#define K210_PCF_SPI1_D4 74 /* SPI1 Data 4 */
#define K210_PCF_SPI1_D5 75 /* SPI1 Data 5 */
#define K210_PCF_SPI1_D6 76 /* SPI1 Data 6 */
#define K210_PCF_SPI1_D7 77 /* SPI1 Data 7 */
#define K210_PCF_SPI1_SS0 78 /* SPI1 Chip Select 0 */
#define K210_PCF_SPI1_SS1 79 /* SPI1 Chip Select 1 */
#define K210_PCF_SPI1_SS2 80 /* SPI1 Chip Select 2 */
#define K210_PCF_SPI1_SS3 81 /* SPI1 Chip Select 3 */
#define K210_PCF_SPI1_ARB 82 /* SPI1 Arbitration */
#define K210_PCF_SPI1_SCLK 83 /* SPI1 Serial Clock */
#define K210_PCF_SPI2_D0 84 /* SPI2 Data 0 */
#define K210_PCF_SPI2_SS 85 /* SPI2 Select */
#define K210_PCF_SPI2_SCLK 86 /* SPI2 Serial Clock */
#define K210_PCF_I2S0_MCLK 87 /* I2S0 Master Clock */
#define K210_PCF_I2S0_SCLK 88 /* I2S0 Serial Clock(BCLK) */
#define K210_PCF_I2S0_WS 89 /* I2S0 Word Select(LRCLK) */
#define K210_PCF_I2S0_IN_D0 90 /* I2S0 Serial Data Input 0 */
#define K210_PCF_I2S0_IN_D1 91 /* I2S0 Serial Data Input 1 */
#define K210_PCF_I2S0_IN_D2 92 /* I2S0 Serial Data Input 2 */
#define K210_PCF_I2S0_IN_D3 93 /* I2S0 Serial Data Input 3 */
#define K210_PCF_I2S0_OUT_D0 94 /* I2S0 Serial Data Output 0 */
#define K210_PCF_I2S0_OUT_D1 95 /* I2S0 Serial Data Output 1 */
#define K210_PCF_I2S0_OUT_D2 96 /* I2S0 Serial Data Output 2 */
#define K210_PCF_I2S0_OUT_D3 97 /* I2S0 Serial Data Output 3 */
#define K210_PCF_I2S1_MCLK 98 /* I2S1 Master Clock */
#define K210_PCF_I2S1_SCLK 99 /* I2S1 Serial Clock(BCLK) */
#define K210_PCF_I2S1_WS 100 /* I2S1 Word Select(LRCLK) */
#define K210_PCF_I2S1_IN_D0 101 /* I2S1 Serial Data Input 0 */
#define K210_PCF_I2S1_IN_D1 102 /* I2S1 Serial Data Input 1 */
#define K210_PCF_I2S1_IN_D2 103 /* I2S1 Serial Data Input 2 */
#define K210_PCF_I2S1_IN_D3 104 /* I2S1 Serial Data Input 3 */
#define K210_PCF_I2S1_OUT_D0 105 /* I2S1 Serial Data Output 0 */
#define K210_PCF_I2S1_OUT_D1 106 /* I2S1 Serial Data Output 1 */
#define K210_PCF_I2S1_OUT_D2 107 /* I2S1 Serial Data Output 2 */
#define K210_PCF_I2S1_OUT_D3 108 /* I2S1 Serial Data Output 3 */
#define K210_PCF_I2S2_MCLK 109 /* I2S2 Master Clock */
#define K210_PCF_I2S2_SCLK 110 /* I2S2 Serial Clock(BCLK) */
#define K210_PCF_I2S2_WS 111 /* I2S2 Word Select(LRCLK) */
#define K210_PCF_I2S2_IN_D0 112 /* I2S2 Serial Data Input 0 */
#define K210_PCF_I2S2_IN_D1 113 /* I2S2 Serial Data Input 1 */
#define K210_PCF_I2S2_IN_D2 114 /* I2S2 Serial Data Input 2 */
#define K210_PCF_I2S2_IN_D3 115 /* I2S2 Serial Data Input 3 */
#define K210_PCF_I2S2_OUT_D0 116 /* I2S2 Serial Data Output 0 */
#define K210_PCF_I2S2_OUT_D1 117 /* I2S2 Serial Data Output 1 */
#define K210_PCF_I2S2_OUT_D2 118 /* I2S2 Serial Data Output 2 */
#define K210_PCF_I2S2_OUT_D3 119 /* I2S2 Serial Data Output 3 */
#define K210_PCF_RESV0 120 /* Reserved function */
#define K210_PCF_RESV1 121 /* Reserved function */
#define K210_PCF_RESV2 122 /* Reserved function */
#define K210_PCF_RESV3 123 /* Reserved function */
#define K210_PCF_RESV4 124 /* Reserved function */
#define K210_PCF_RESV5 125 /* Reserved function */
#define K210_PCF_I2C0_SCLK 126 /* I2C0 Serial Clock */
#define K210_PCF_I2C0_SDA 127 /* I2C0 Serial Data */
#define K210_PCF_I2C1_SCLK 128 /* I2C1 Serial Clock */
#define K210_PCF_I2C1_SDA 129 /* I2C1 Serial Data */
#define K210_PCF_I2C2_SCLK 130 /* I2C2 Serial Clock */
#define K210_PCF_I2C2_SDA 131 /* I2C2 Serial Data */
#define K210_PCF_DVP_XCLK 132 /* DVP System Clock */
#define K210_PCF_DVP_RST 133 /* DVP System Reset */
#define K210_PCF_DVP_PWDN 134 /* DVP Power Down Mode */
#define K210_PCF_DVP_VSYNC 135 /* DVP Vertical Sync */
#define K210_PCF_DVP_HSYNC 136 /* DVP Horizontal Sync */
#define K210_PCF_DVP_PCLK 137 /* Pixel Clock */
#define K210_PCF_DVP_D0 138 /* Data Bit 0 */
#define K210_PCF_DVP_D1 139 /* Data Bit 1 */
#define K210_PCF_DVP_D2 140 /* Data Bit 2 */
#define K210_PCF_DVP_D3 141 /* Data Bit 3 */
#define K210_PCF_DVP_D4 142 /* Data Bit 4 */
#define K210_PCF_DVP_D5 143 /* Data Bit 5 */
#define K210_PCF_DVP_D6 144 /* Data Bit 6 */
#define K210_PCF_DVP_D7 145 /* Data Bit 7 */
#define K210_PCF_SCCB_SCLK 146 /* Serial Camera Control Bus Clock */
#define K210_PCF_SCCB_SDA 147 /* Serial Camera Control Bus Data */
#define K210_PCF_UART1_CTS 148 /* UART1 Clear To Send */
#define K210_PCF_UART1_DSR 149 /* UART1 Data Set Ready */
#define K210_PCF_UART1_DCD 150 /* UART1 Data Carrier Detect */
#define K210_PCF_UART1_RI 151 /* UART1 Ring Indicator */
#define K210_PCF_UART1_SIR_IN 152 /* UART1 Serial Infrared Input */
#define K210_PCF_UART1_DTR 153 /* UART1 Data Terminal Ready */
#define K210_PCF_UART1_RTS 154 /* UART1 Request To Send */
#define K210_PCF_UART1_OUT2 155 /* UART1 User-designated Output 2 */
#define K210_PCF_UART1_OUT1 156 /* UART1 User-designated Output 1 */
#define K210_PCF_UART1_SIR_OUT 157 /* UART1 Serial Infrared Output */
#define K210_PCF_UART1_BAUD 158 /* UART1 Transmit Clock Output */
#define K210_PCF_UART1_RE 159 /* UART1 Receiver Output Enable */
#define K210_PCF_UART1_DE 160 /* UART1 Driver Output Enable */
#define K210_PCF_UART1_RS485_EN 161 /* UART1 RS485 Enable */
#define K210_PCF_UART2_CTS 162 /* UART2 Clear To Send */
#define K210_PCF_UART2_DSR 163 /* UART2 Data Set Ready */
#define K210_PCF_UART2_DCD 164 /* UART2 Data Carrier Detect */
#define K210_PCF_UART2_RI 165 /* UART2 Ring Indicator */
#define K210_PCF_UART2_SIR_IN 166 /* UART2 Serial Infrared Input */
#define K210_PCF_UART2_DTR 167 /* UART2 Data Terminal Ready */
#define K210_PCF_UART2_RTS 168 /* UART2 Request To Send */
#define K210_PCF_UART2_OUT2 169 /* UART2 User-designated Output 2 */
#define K210_PCF_UART2_OUT1 170 /* UART2 User-designated Output 1 */
#define K210_PCF_UART2_SIR_OUT 171 /* UART2 Serial Infrared Output */
#define K210_PCF_UART2_BAUD 172 /* UART2 Transmit Clock Output */
#define K210_PCF_UART2_RE 173 /* UART2 Receiver Output Enable */
#define K210_PCF_UART2_DE 174 /* UART2 Driver Output Enable */
#define K210_PCF_UART2_RS485_EN 175 /* UART2 RS485 Enable */
#define K210_PCF_UART3_CTS 176 /* UART3 Clear To Send */
#define K210_PCF_UART3_DSR 177 /* UART3 Data Set Ready */
#define K210_PCF_UART3_DCD 178 /* UART3 Data Carrier Detect */
#define K210_PCF_UART3_RI 179 /* UART3 Ring Indicator */
#define K210_PCF_UART3_SIR_IN 180 /* UART3 Serial Infrared Input */
#define K210_PCF_UART3_DTR 181 /* UART3 Data Terminal Ready */
#define K210_PCF_UART3_RTS 182 /* UART3 Request To Send */
#define K210_PCF_UART3_OUT2 183 /* UART3 User-designated Output 2 */
#define K210_PCF_UART3_OUT1 184 /* UART3 User-designated Output 1 */
#define K210_PCF_UART3_SIR_OUT 185 /* UART3 Serial Infrared Output */
#define K210_PCF_UART3_BAUD 186 /* UART3 Transmit Clock Output */
#define K210_PCF_UART3_RE 187 /* UART3 Receiver Output Enable */
#define K210_PCF_UART3_DE 188 /* UART3 Driver Output Enable */
#define K210_PCF_UART3_RS485_EN 189 /* UART3 RS485 Enable */
#define K210_PCF_TIMER0_TOGGLE1 190 /* TIMER0 Toggle Output 1 */
#define K210_PCF_TIMER0_TOGGLE2 191 /* TIMER0 Toggle Output 2 */
#define K210_PCF_TIMER0_TOGGLE3 192 /* TIMER0 Toggle Output 3 */
#define K210_PCF_TIMER0_TOGGLE4 193 /* TIMER0 Toggle Output 4 */
#define K210_PCF_TIMER1_TOGGLE1 194 /* TIMER1 Toggle Output 1 */
#define K210_PCF_TIMER1_TOGGLE2 195 /* TIMER1 Toggle Output 2 */
#define K210_PCF_TIMER1_TOGGLE3 196 /* TIMER1 Toggle Output 3 */
#define K210_PCF_TIMER1_TOGGLE4 197 /* TIMER1 Toggle Output 4 */
#define K210_PCF_TIMER2_TOGGLE1 198 /* TIMER2 Toggle Output 1 */
#define K210_PCF_TIMER2_TOGGLE2 199 /* TIMER2 Toggle Output 2 */
#define K210_PCF_TIMER2_TOGGLE3 200 /* TIMER2 Toggle Output 3 */
#define K210_PCF_TIMER2_TOGGLE4 201 /* TIMER2 Toggle Output 4 */
#define K210_PCF_CLK_SPI2 202 /* Clock SPI2 */
#define K210_PCF_CLK_I2C2 203 /* Clock I2C2 */
#define K210_PCF_INTERNAL0 204 /* Internal function signal 0 */
#define K210_PCF_INTERNAL1 205 /* Internal function signal 1 */
#define K210_PCF_INTERNAL2 206 /* Internal function signal 2 */
#define K210_PCF_INTERNAL3 207 /* Internal function signal 3 */
#define K210_PCF_INTERNAL4 208 /* Internal function signal 4 */
#define K210_PCF_INTERNAL5 209 /* Internal function signal 5 */
#define K210_PCF_INTERNAL6 210 /* Internal function signal 6 */
#define K210_PCF_INTERNAL7 211 /* Internal function signal 7 */
#define K210_PCF_INTERNAL8 212 /* Internal function signal 8 */
#define K210_PCF_INTERNAL9 213 /* Internal function signal 9 */
#define K210_PCF_INTERNAL10 214 /* Internal function signal 10 */
#define K210_PCF_INTERNAL11 215 /* Internal function signal 11 */
#define K210_PCF_INTERNAL12 216 /* Internal function signal 12 */
#define K210_PCF_INTERNAL13 217 /* Internal function signal 13 */
#define K210_PCF_INTERNAL14 218 /* Internal function signal 14 */
#define K210_PCF_INTERNAL15 219 /* Internal function signal 15 */
#define K210_PCF_INTERNAL16 220 /* Internal function signal 16 */
#define K210_PCF_INTERNAL17 221 /* Internal function signal 17 */
#define K210_PCF_CONSTANT 222 /* Constant function */
#define K210_PCF_INTERNAL18 223 /* Internal function signal 18 */
#define K210_PCF_DEBUG0 224 /* Debug function 0 */
#define K210_PCF_DEBUG1 225 /* Debug function 1 */
#define K210_PCF_DEBUG2 226 /* Debug function 2 */
#define K210_PCF_DEBUG3 227 /* Debug function 3 */
#define K210_PCF_DEBUG4 228 /* Debug function 4 */
#define K210_PCF_DEBUG5 229 /* Debug function 5 */
#define K210_PCF_DEBUG6 230 /* Debug function 6 */
#define K210_PCF_DEBUG7 231 /* Debug function 7 */
#define K210_PCF_DEBUG8 232 /* Debug function 8 */
#define K210_PCF_DEBUG9 233 /* Debug function 9 */
#define K210_PCF_DEBUG10 234 /* Debug function 10 */
#define K210_PCF_DEBUG11 235 /* Debug function 11 */
#define K210_PCF_DEBUG12 236 /* Debug function 12 */
#define K210_PCF_DEBUG13 237 /* Debug function 13 */
#define K210_PCF_DEBUG14 238 /* Debug function 14 */
#define K210_PCF_DEBUG15 239 /* Debug function 15 */
#define K210_PCF_DEBUG16 240 /* Debug function 16 */
#define K210_PCF_DEBUG17 241 /* Debug function 17 */
#define K210_PCF_DEBUG18 242 /* Debug function 18 */
#define K210_PCF_DEBUG19 243 /* Debug function 19 */
#define K210_PCF_DEBUG20 244 /* Debug function 20 */
#define K210_PCF_DEBUG21 245 /* Debug function 21 */
#define K210_PCF_DEBUG22 246 /* Debug function 22 */
#define K210_PCF_DEBUG23 247 /* Debug function 23 */
#define K210_PCF_DEBUG24 248 /* Debug function 24 */
#define K210_PCF_DEBUG25 249 /* Debug function 25 */
#define K210_PCF_DEBUG26 250 /* Debug function 26 */
#define K210_PCF_DEBUG27 251 /* Debug function 27 */
#define K210_PCF_DEBUG28 252 /* Debug function 28 */
#define K210_PCF_DEBUG29 253 /* Debug function 29 */
#define K210_PCF_DEBUG30 254 /* Debug function 30 */
#define K210_PCF_DEBUG31 255 /* Debug function 31 */
#define K210_FPIOA(pin, func) (((pin) << 16) | (func))
#define K210_PC_POWER_3V3 0
#define K210_PC_POWER_1V8 1
#endif /* PINCTRL_K210_FPIOA_H */

View File

@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
#ifndef RESET_K210_SYSCTL_H
#define RESET_K210_SYSCTL_H
/*
* Kendryte K210 SoC system controller K210_SYSCTL_SOFT_RESET register bits.
* Taken from Kendryte SDK (kendryte-standalone-sdk).
*/
#define K210_RST_ROM 0
#define K210_RST_DMA 1
#define K210_RST_AI 2
#define K210_RST_DVP 3
#define K210_RST_FFT 4
#define K210_RST_GPIO 5
#define K210_RST_SPI0 6
#define K210_RST_SPI1 7
#define K210_RST_SPI2 8
#define K210_RST_SPI3 9
#define K210_RST_I2S0 10
#define K210_RST_I2S1 11
#define K210_RST_I2S2 12
#define K210_RST_I2C0 13
#define K210_RST_I2C1 14
#define K210_RST_I2C2 15
#define K210_RST_UART1 16
#define K210_RST_UART2 17
#define K210_RST_UART3 18
#define K210_RST_AES 19
#define K210_RST_FPIOA 20
#define K210_RST_TIMER0 21
#define K210_RST_TIMER1 22
#define K210_RST_TIMER2 23
#define K210_RST_WDT0 24
#define K210_RST_WDT1 25
#define K210_RST_SHA 26
#define K210_RST_RTC 29
#endif /* RESET_K210_SYSCTL_H */

View File

@@ -2,7 +2,7 @@
/*
* Copyright (C) 2010 IBM Corporation
* Copyright (C) 2010 Politecnico di Torino, Italy
* TORSEC group -- http://security.polito.it
* TORSEC group -- https://security.polito.it
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>

View File

@@ -591,9 +591,6 @@ extern u32 osc_sb_native_usb4_control;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -749,12 +746,12 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
static inline void acpi_dev_put(struct acpi_device *adev) {}
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
return false;
}
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return false;
}
@@ -764,7 +761,7 @@ static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwno
return NULL;
}
static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return false;
}

View File

@@ -20,7 +20,12 @@
#define BIO_BUG_ON
#endif
#define BIO_MAX_PAGES 256
#define BIO_MAX_PAGES 256U
static inline unsigned int bio_max_segs(unsigned int nr_segs)
{
return min(nr_segs, BIO_MAX_PAGES);
}
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)

View File

@@ -214,7 +214,7 @@ static inline int get_count_order_long(unsigned long l)
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
* On 64 bit arches this is a synomyn for __ffs
* On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/

View File

@@ -65,8 +65,6 @@ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
* request flags */
typedef __u32 __bitwise req_flags_t;
/* elevator knows about this request */
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
/* may not be passed by ioscheduler */
@@ -462,7 +460,6 @@ struct request_queue {
#ifdef CONFIG_PM
struct device *dev;
enum rpm_status rpm_status;
unsigned int nr_pending;
#endif
/*

View File

@@ -23,8 +23,6 @@ struct blk_trace {
u32 pid;
u32 dev;
struct dentry *dir;
struct dentry *dropped_file;
struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
@@ -119,7 +117,7 @@ struct compat_blk_user_trace_setup {
#endif
extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
void blk_fill_rwbs(char *rwbs, unsigned int op);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{

View File

@@ -44,6 +44,7 @@
#include <linux/can.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
@@ -65,4 +66,15 @@ struct can_ml_priv {
#endif
};
static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
{
return netdev_get_ml_priv(dev, ML_PRIV_CAN);
}
static inline void can_set_ml_priv(struct net_device *dev,
struct can_ml_priv *ml_priv)
{
netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
}
#endif /* CAN_ML_H */

View File

@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: cfag12864b LCD driver header
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
*/

View File

@@ -3,16 +3,6 @@
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
#define CLANG_VERSION (__clang_major__ * 10000 \
+ __clang_minor__ * 100 \
+ __clang_patchlevel__)
#if CLANG_VERSION < 100001
#ifndef __BPF_TRACING__
# error Sorry, your version of Clang is too old - please use 10.0.1 or newer.
#endif
#endif
/* Compiler specific definitions for Clang compiler */
/* same as gcc, this was present in clang-2.6 so we can assume it works

View File

@@ -10,17 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
#if GCC_VERSION < 40900
# error Sorry, your version of GCC is too old - please use 4.9 or newer.
#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
/*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
* https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
*/
# error Sorry, your version of GCC is too old - please use 5.1 or newer.
#endif
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.

View File

@@ -10,17 +10,27 @@
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
#define CORESIGHT_ETM_PMU_SEED 0x10
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
#define ETM_OPT_CTXTID 14
#define ETM_OPT_TS 28
#define ETM_OPT_RETSTK 29
/*
* Below are the definition of bit offsets for perf option, and works as
* arbitrary values for all ETM versions.
*
* Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
* ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
* directly use below macros as config bits.
*/
#define ETM_OPT_CYCACC 12
#define ETM_OPT_CTXTID 14
#define ETM_OPT_CTXTID2 15
#define ETM_OPT_TS 28
#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
#define ETM4_CFG_BIT_VMID_OPT 15
static inline int coresight_get_trace_id(int cpu)
{

View File

@@ -7,6 +7,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -114,6 +115,32 @@ struct coresight_platform_data {
struct coresight_connection *conns;
};
/**
* struct csdev_access - Abstraction of a CoreSight device access.
*
* @io_mem : True if the device has memory mapped I/O
* @base : When io_mem == true, base address of the component
* @read : Read from the given "offset" of the given instance.
* @write : Write "val" to the given "offset".
*/
struct csdev_access {
bool io_mem;
union {
void __iomem *base;
struct {
u64 (*read)(u32 offset, bool relaxed, bool _64bit);
void (*write)(u64 val, u32 offset, bool relaxed,
bool _64bit);
};
};
};
#define CSDEV_ACCESS_IOMEM(_addr) \
((struct csdev_access) { \
.io_mem = true, \
.base = (_addr), \
})
/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
@@ -125,6 +152,7 @@ struct coresight_platform_data {
* @groups: operations specific to this component. These will end up
* in the component's sysfs sub-directory.
* @name: name for the coresight device, also shown under sysfs.
* @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
@@ -134,6 +162,7 @@ struct coresight_desc {
struct device *dev;
const struct attribute_group **groups;
const char *name;
struct csdev_access access;
};
/**
@@ -173,7 +202,8 @@ struct coresight_sysfs_link {
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
by @coresight_ops.
* by @coresight_ops.
* @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
@@ -195,6 +225,7 @@ struct coresight_device {
enum coresight_dev_type type;
union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct csdev_access access;
struct device dev;
atomic_t *refcnt;
bool orphan;
@@ -326,23 +357,133 @@ struct coresight_ops {
};
#if IS_ENABLED(CONFIG_CORESIGHT)
static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
u32 offset)
{
if (likely(csa->io_mem))
return readl_relaxed(csa->base + offset);
return csa->read(offset, true, false);
}
static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
{
if (likely(csa->io_mem))
return readl(csa->base + offset);
return csa->read(offset, false, false);
}
static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
u32 val, u32 offset)
{
if (likely(csa->io_mem))
writel_relaxed(val, csa->base + offset);
else
csa->write(val, offset, true, false);
}
static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
{
if (likely(csa->io_mem))
writel(val, csa->base + offset);
else
csa->write(val, offset, false, false);
}
#ifdef CONFIG_64BIT
static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
u32 offset)
{
if (likely(csa->io_mem))
return readq_relaxed(csa->base + offset);
return csa->read(offset, true, true);
}
static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
{
if (likely(csa->io_mem))
return readq(csa->base + offset);
return csa->read(offset, false, true);
}
static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
u64 val, u32 offset)
{
if (likely(csa->io_mem))
writeq_relaxed(val, csa->base + offset);
else
csa->write(val, offset, true, true);
}
static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
{
if (likely(csa->io_mem))
writeq(val, csa->base + offset);
else
csa->write(val, offset, false, true);
}
#else /* !CONFIG_64BIT */
static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
u32 offset)
{
WARN_ON(1);
return 0;
}
static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
{
WARN_ON(1);
return 0;
}
static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
u64 val, u32 offset)
{
WARN_ON(1);
}
static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
{
WARN_ON(1);
}
#endif /* CONFIG_64BIT */
extern struct coresight_device *
coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(void __iomem *addr, u32 offset,
extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
extern int coresight_claim_device(void __iomem *base);
extern int coresight_claim_device_unlocked(void __iomem *base);
extern int coresight_claim_device(struct coresight_device *csdev);
extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
extern void coresight_disclaim_device(void __iomem *base);
extern void coresight_disclaim_device_unlocked(void __iomem *base);
extern void coresight_disclaim_device(struct coresight_device *csdev);
extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
extern bool coresight_loses_context_with_cpu(struct device *dev);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
u32 coresight_read32(struct coresight_device *csdev, u32 offset);
void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
void coresight_relaxed_write32(struct coresight_device *csdev,
u32 val, u32 offset);
u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
u64 coresight_read64(struct coresight_device *csdev, u32 offset);
void coresight_relaxed_write64(struct coresight_device *csdev,
u64 val, u32 offset);
void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -350,29 +491,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
static inline int coresight_claim_device_unlocked(void __iomem *base)
static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value)
{
return 1;
}
static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
return -EINVAL;
}
static inline int coresight_claim_device(void __iomem *base)
static inline int coresight_claim_device(struct coresight_device *csdev)
{
return -EINVAL;
}
static inline void coresight_disclaim_device(void __iomem *base) {}
static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
static inline bool coresight_loses_context_with_cpu(struct device *dev)
{
return false;
}
#endif
static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
{
WARN_ON_ONCE(1);
return 0;
}
static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
{
WARN_ON_ONCE(1);
return 0;
}
static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
{
}
static inline void coresight_relaxed_write32(struct coresight_device *csdev,
u32 val, u32 offset)
{
}
static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
u32 offset)
{
WARN_ON_ONCE(1);
return 0;
}
static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
{
WARN_ON_ONCE(1);
return 0;
}
static inline void coresight_relaxed_write64(struct coresight_device *csdev,
u64 val, u32 offset)
{
}
static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
{
}
#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
#endif
#endif /* _LINUX_COREISGHT_H */

View File

@@ -168,6 +168,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_CFD_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
@@ -185,6 +186,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
CPUHP_AP_PERF_CSKY_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,

View File

@@ -25,7 +25,7 @@ struct inode;
struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[0];
kgid_t gid[];
} __randomize_layout;
/**

View File

@@ -291,6 +291,7 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
@@ -323,6 +324,7 @@ enum device_link_state {
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
* INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -332,6 +334,7 @@ enum device_link_state {
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
#define DL_FLAG_INFERRED BIT(8)
/**
* enum dl_dev_state - Device driver presence tracking information.

View File

@@ -75,7 +75,7 @@ enum probe_type {
* @resume: Called to bring a device from sleep mode.
* @groups: Default attributes that get created by the driver core
* automatically.
* @dev_groups: Additional attributes attached to device instance once the
* @dev_groups: Additional attributes attached to device instance once
* it is bound to the driver.
* @pm: Power management operations of the device which matched
* this driver.

86
include/linux/dfl.h Normal file
View File

@@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for DFL driver and device API
*
* Copyright (C) 2020 Intel Corporation, Inc.
*/
#ifndef __LINUX_DFL_H
#define __LINUX_DFL_H
#include <linux/device.h>
#include <linux/mod_devicetable.h>
/**
* enum dfl_id_type - define the DFL FIU types
*/
enum dfl_id_type {
FME_ID = 0,
PORT_ID = 1,
DFL_ID_MAX,
};
/**
* struct dfl_device - represent an dfl device on dfl bus
*
* @dev: generic device interface.
* @id: id of the dfl device.
* @type: type of DFL FIU of the device. See enum dfl_id_type.
* @feature_id: feature identifier local to its DFL FIU type.
* @mmio_res: mmio resource of this dfl device.
* @irqs: list of Linux IRQ numbers of this dfl device.
* @num_irqs: number of IRQs supported by this dfl device.
* @cdev: pointer to DFL FPGA container device this dfl device belongs to.
* @id_entry: matched id entry in dfl driver's id table.
*/
struct dfl_device {
struct device dev;
int id;
u16 type;
u16 feature_id;
struct resource mmio_res;
int *irqs;
unsigned int num_irqs;
struct dfl_fpga_cdev *cdev;
const struct dfl_device_id *id_entry;
};
/**
* struct dfl_driver - represent an dfl device driver
*
* @drv: driver model structure.
* @id_table: pointer to table of device IDs the driver is interested in.
* { } member terminated.
* @probe: mandatory callback for device binding.
* @remove: callback for device unbinding.
*/
struct dfl_driver {
struct device_driver drv;
const struct dfl_device_id *id_table;
int (*probe)(struct dfl_device *dfl_dev);
void (*remove)(struct dfl_device *dfl_dev);
};
#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
/*
* use a macro to avoid include chaining to get THIS_MODULE.
*/
#define dfl_driver_register(drv) \
__dfl_driver_register(drv, THIS_MODULE)
int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
void dfl_driver_unregister(struct dfl_driver *dfl_drv);
/*
* module_dfl_driver() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
* boilerplate. Each module may only use this macro once, and
* calling it replaces module_init() and module_exit().
*/
#define module_dfl_driver(__dfl_driver) \
module_driver(__dfl_driver, dfl_driver_register, \
dfl_driver_unregister)
#endif /* __LINUX_DFL_H */

View File

@@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {}
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,

View File

@@ -16,15 +16,15 @@ struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
* @allocate: allocate dmabuf and return fd
* @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf fd on success, -errno on error.
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
int (*allocate)(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags);
struct dma_buf *(*allocate)(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags);
};
/**

View File

@@ -22,11 +22,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
void *(*alloc_noncoherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir,
gfp_t gfp);
void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);

View File

@@ -263,10 +263,19 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir);
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
return page ? page_address(page) : NULL;
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
{
dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
}
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
@@ -500,6 +509,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
{
if (dev->dma_parms)
return dev->dma_parms->min_align_mask;
return 0;
}
static inline int dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
return -EIO;
dev->dma_parms->min_align_mask = min_align_mask;
return 0;
}
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN

View File

@@ -42,14 +42,14 @@ enum psil_endpoint_type {
/**
* struct psil_endpoint_config - PSI-L Endpoint configuration
* @ep_type: PSI-L endpoint type
* @channel_tpl: Desired throughput level for the channel
* @pkt_mode: If set, the channel must be in Packet mode, otherwise in
* TR mode
* @notdpkt: TDCM must be suppressed on the TX channel
* @needs_epib: Endpoint needs EPIB
* @psd_size: If set, PSdata is used by the endpoint
* @channel_tpl: Desired throughput level for the channel
* @pdma_acc32: ACC32 must be enabled on the PDMA side
* @pdma_burst: BURST must be enabled on the PDMA side
* @psd_size: If set, PSdata is used by the endpoint
* @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
* The thread must be serviced by the specified channel if
* mapped_channel_id is >= 0 in case of PKTDMA
@@ -62,23 +62,22 @@ enum psil_endpoint_type {
*/
struct psil_endpoint_config {
enum psil_endpoint_type ep_type;
enum udma_tp_level channel_tpl;
unsigned pkt_mode:1;
unsigned notdpkt:1;
unsigned needs_epib:1;
u32 psd_size;
enum udma_tp_level channel_tpl;
/* PDMA properties, valid for PSIL_EP_PDMA_* */
unsigned pdma_acc32:1;
unsigned pdma_burst:1;
u32 psd_size;
/* PKDMA mapped channel */
int mapped_channel_id;
s16 mapped_channel_id;
/* PKTDMA tflow and rflow ranges for mapped channel */
u16 flow_start;
u16 flow_num;
u16 default_flow_id;
s16 default_flow_id;
};
int psil_set_new_ep_config(struct device *dev, const char *name,

View File

@@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MMP_PDMA_H_
#define _MMP_PDMA_H_
struct dma_chan;
#ifdef CONFIG_MMP_PDMA
bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
#else
static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
{
return false;
}
#endif
#endif /* _MMP_PDMA_H_ */

View File

@@ -745,6 +745,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
DMAENGINE_ALIGN_128_BYTES = 7,
DMAENGINE_ALIGN_256_BYTES = 8,
};
/**

View File

@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
/* Add extra cycle after address during a read */
#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic

View File

@@ -64,28 +64,27 @@ enum pm_api_id {
PM_GET_API_VERSION = 1,
PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
PM_RELEASE_NODE = 14,
PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
PM_RESET_GET_STATUS,
PM_RESET_GET_STATUS = 18,
PM_PM_INIT_FINALIZE = 21,
PM_FPGA_LOAD,
PM_FPGA_GET_STATUS,
PM_FPGA_LOAD = 22,
PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
PM_IOCTL = 34,
PM_QUERY_DATA,
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
PM_CLOCK_GETSTATE,
PM_CLOCK_SETDIVIDER,
PM_CLOCK_GETDIVIDER,
PM_CLOCK_SETRATE,
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
PM_QUERY_DATA = 35,
PM_CLOCK_ENABLE = 36,
PM_CLOCK_DISABLE = 37,
PM_CLOCK_GETSTATE = 38,
PM_CLOCK_SETDIVIDER = 39,
PM_CLOCK_GETDIVIDER = 40,
PM_CLOCK_SETRATE = 41,
PM_CLOCK_GETRATE = 42,
PM_CLOCK_SETPARENT = 43,
PM_CLOCK_GETPARENT = 44,
PM_SECURE_AES = 47,
PM_FEATURE_CHECK = 63,
PM_API_MAX,
};
/* PMU-FW return status codes */
@@ -93,21 +92,21 @@ enum pm_ret_status {
XST_PM_SUCCESS = 0,
XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
XST_PM_CONFLICT = 2001,
XST_PM_NO_ACCESS = 2002,
XST_PM_INVALID_NODE = 2003,
XST_PM_DOUBLE_REQ = 2004,
XST_PM_ABORT_SUSPEND = 2005,
XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
IOCTL_SD_DLL_RESET = 6,
IOCTL_SET_SD_TAPDELAY,
IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
IOCTL_SET_SD_TAPDELAY = 7,
IOCTL_SET_PLL_FRAC_MODE = 8,
IOCTL_GET_PLL_FRAC_MODE = 9,
IOCTL_SET_PLL_FRAC_DATA = 10,
IOCTL_GET_PLL_FRAC_DATA = 11,
IOCTL_WRITE_GGS = 12,
IOCTL_READ_GGS = 13,
IOCTL_WRITE_PGGS = 14,
@@ -117,185 +116,185 @@ enum pm_ioctl_id {
};
enum pm_query_id {
PM_QID_INVALID,
PM_QID_CLOCK_GET_NAME,
PM_QID_CLOCK_GET_TOPOLOGY,
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
PM_QID_INVALID = 0,
PM_QID_CLOCK_GET_NAME = 1,
PM_QID_CLOCK_GET_TOPOLOGY = 2,
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
PM_QID_CLOCK_GET_PARENTS = 4,
PM_QID_CLOCK_GET_ATTRIBUTES = 5,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
PM_QID_CLOCK_GET_MAX_DIVISOR,
PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
};
enum zynqmp_pm_reset_action {
PM_RESET_ACTION_RELEASE,
PM_RESET_ACTION_ASSERT,
PM_RESET_ACTION_PULSE,
PM_RESET_ACTION_RELEASE = 0,
PM_RESET_ACTION_ASSERT = 1,
PM_RESET_ACTION_PULSE = 2,
};
enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_START = 1000,
ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
ZYNQMP_PM_RESET_PCIE_BRIDGE,
ZYNQMP_PM_RESET_PCIE_CTRL,
ZYNQMP_PM_RESET_DP,
ZYNQMP_PM_RESET_SWDT_CRF,
ZYNQMP_PM_RESET_AFI_FM5,
ZYNQMP_PM_RESET_AFI_FM4,
ZYNQMP_PM_RESET_AFI_FM3,
ZYNQMP_PM_RESET_AFI_FM2,
ZYNQMP_PM_RESET_AFI_FM1,
ZYNQMP_PM_RESET_AFI_FM0,
ZYNQMP_PM_RESET_GDMA,
ZYNQMP_PM_RESET_GPU_PP1,
ZYNQMP_PM_RESET_GPU_PP0,
ZYNQMP_PM_RESET_GPU,
ZYNQMP_PM_RESET_GT,
ZYNQMP_PM_RESET_SATA,
ZYNQMP_PM_RESET_ACPU3_PWRON,
ZYNQMP_PM_RESET_ACPU2_PWRON,
ZYNQMP_PM_RESET_ACPU1_PWRON,
ZYNQMP_PM_RESET_ACPU0_PWRON,
ZYNQMP_PM_RESET_APU_L2,
ZYNQMP_PM_RESET_ACPU3,
ZYNQMP_PM_RESET_ACPU2,
ZYNQMP_PM_RESET_ACPU1,
ZYNQMP_PM_RESET_ACPU0,
ZYNQMP_PM_RESET_DDR,
ZYNQMP_PM_RESET_APM_FPD,
ZYNQMP_PM_RESET_SOFT,
ZYNQMP_PM_RESET_GEM0,
ZYNQMP_PM_RESET_GEM1,
ZYNQMP_PM_RESET_GEM2,
ZYNQMP_PM_RESET_GEM3,
ZYNQMP_PM_RESET_QSPI,
ZYNQMP_PM_RESET_UART0,
ZYNQMP_PM_RESET_UART1,
ZYNQMP_PM_RESET_SPI0,
ZYNQMP_PM_RESET_SPI1,
ZYNQMP_PM_RESET_SDIO0,
ZYNQMP_PM_RESET_SDIO1,
ZYNQMP_PM_RESET_CAN0,
ZYNQMP_PM_RESET_CAN1,
ZYNQMP_PM_RESET_I2C0,
ZYNQMP_PM_RESET_I2C1,
ZYNQMP_PM_RESET_TTC0,
ZYNQMP_PM_RESET_TTC1,
ZYNQMP_PM_RESET_TTC2,
ZYNQMP_PM_RESET_TTC3,
ZYNQMP_PM_RESET_SWDT_CRL,
ZYNQMP_PM_RESET_NAND,
ZYNQMP_PM_RESET_ADMA,
ZYNQMP_PM_RESET_GPIO,
ZYNQMP_PM_RESET_IOU_CC,
ZYNQMP_PM_RESET_TIMESTAMP,
ZYNQMP_PM_RESET_RPU_R50,
ZYNQMP_PM_RESET_RPU_R51,
ZYNQMP_PM_RESET_RPU_AMBA,
ZYNQMP_PM_RESET_OCM,
ZYNQMP_PM_RESET_RPU_PGE,
ZYNQMP_PM_RESET_USB0_CORERESET,
ZYNQMP_PM_RESET_USB1_CORERESET,
ZYNQMP_PM_RESET_USB0_HIBERRESET,
ZYNQMP_PM_RESET_USB1_HIBERRESET,
ZYNQMP_PM_RESET_USB0_APB,
ZYNQMP_PM_RESET_USB1_APB,
ZYNQMP_PM_RESET_IPI,
ZYNQMP_PM_RESET_APM_LPD,
ZYNQMP_PM_RESET_RTC,
ZYNQMP_PM_RESET_SYSMON,
ZYNQMP_PM_RESET_AFI_FM6,
ZYNQMP_PM_RESET_LPD_SWDT,
ZYNQMP_PM_RESET_FPD,
ZYNQMP_PM_RESET_RPU_DBG1,
ZYNQMP_PM_RESET_RPU_DBG0,
ZYNQMP_PM_RESET_DBG_LPD,
ZYNQMP_PM_RESET_DBG_FPD,
ZYNQMP_PM_RESET_APLL,
ZYNQMP_PM_RESET_DPLL,
ZYNQMP_PM_RESET_VPLL,
ZYNQMP_PM_RESET_IOPLL,
ZYNQMP_PM_RESET_RPLL,
ZYNQMP_PM_RESET_GPO3_PL_0,
ZYNQMP_PM_RESET_GPO3_PL_1,
ZYNQMP_PM_RESET_GPO3_PL_2,
ZYNQMP_PM_RESET_GPO3_PL_3,
ZYNQMP_PM_RESET_GPO3_PL_4,
ZYNQMP_PM_RESET_GPO3_PL_5,
ZYNQMP_PM_RESET_GPO3_PL_6,
ZYNQMP_PM_RESET_GPO3_PL_7,
ZYNQMP_PM_RESET_GPO3_PL_8,
ZYNQMP_PM_RESET_GPO3_PL_9,
ZYNQMP_PM_RESET_GPO3_PL_10,
ZYNQMP_PM_RESET_GPO3_PL_11,
ZYNQMP_PM_RESET_GPO3_PL_12,
ZYNQMP_PM_RESET_GPO3_PL_13,
ZYNQMP_PM_RESET_GPO3_PL_14,
ZYNQMP_PM_RESET_GPO3_PL_15,
ZYNQMP_PM_RESET_GPO3_PL_16,
ZYNQMP_PM_RESET_GPO3_PL_17,
ZYNQMP_PM_RESET_GPO3_PL_18,
ZYNQMP_PM_RESET_GPO3_PL_19,
ZYNQMP_PM_RESET_GPO3_PL_20,
ZYNQMP_PM_RESET_GPO3_PL_21,
ZYNQMP_PM_RESET_GPO3_PL_22,
ZYNQMP_PM_RESET_GPO3_PL_23,
ZYNQMP_PM_RESET_GPO3_PL_24,
ZYNQMP_PM_RESET_GPO3_PL_25,
ZYNQMP_PM_RESET_GPO3_PL_26,
ZYNQMP_PM_RESET_GPO3_PL_27,
ZYNQMP_PM_RESET_GPO3_PL_28,
ZYNQMP_PM_RESET_GPO3_PL_29,
ZYNQMP_PM_RESET_GPO3_PL_30,
ZYNQMP_PM_RESET_GPO3_PL_31,
ZYNQMP_PM_RESET_RPU_LS,
ZYNQMP_PM_RESET_PS_ONLY,
ZYNQMP_PM_RESET_PL,
ZYNQMP_PM_RESET_PS_PL0,
ZYNQMP_PM_RESET_PS_PL1,
ZYNQMP_PM_RESET_PS_PL2,
ZYNQMP_PM_RESET_PS_PL3,
ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
ZYNQMP_PM_RESET_DP = 1003,
ZYNQMP_PM_RESET_SWDT_CRF = 1004,
ZYNQMP_PM_RESET_AFI_FM5 = 1005,
ZYNQMP_PM_RESET_AFI_FM4 = 1006,
ZYNQMP_PM_RESET_AFI_FM3 = 1007,
ZYNQMP_PM_RESET_AFI_FM2 = 1008,
ZYNQMP_PM_RESET_AFI_FM1 = 1009,
ZYNQMP_PM_RESET_AFI_FM0 = 1010,
ZYNQMP_PM_RESET_GDMA = 1011,
ZYNQMP_PM_RESET_GPU_PP1 = 1012,
ZYNQMP_PM_RESET_GPU_PP0 = 1013,
ZYNQMP_PM_RESET_GPU = 1014,
ZYNQMP_PM_RESET_GT = 1015,
ZYNQMP_PM_RESET_SATA = 1016,
ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
ZYNQMP_PM_RESET_APU_L2 = 1021,
ZYNQMP_PM_RESET_ACPU3 = 1022,
ZYNQMP_PM_RESET_ACPU2 = 1023,
ZYNQMP_PM_RESET_ACPU1 = 1024,
ZYNQMP_PM_RESET_ACPU0 = 1025,
ZYNQMP_PM_RESET_DDR = 1026,
ZYNQMP_PM_RESET_APM_FPD = 1027,
ZYNQMP_PM_RESET_SOFT = 1028,
ZYNQMP_PM_RESET_GEM0 = 1029,
ZYNQMP_PM_RESET_GEM1 = 1030,
ZYNQMP_PM_RESET_GEM2 = 1031,
ZYNQMP_PM_RESET_GEM3 = 1032,
ZYNQMP_PM_RESET_QSPI = 1033,
ZYNQMP_PM_RESET_UART0 = 1034,
ZYNQMP_PM_RESET_UART1 = 1035,
ZYNQMP_PM_RESET_SPI0 = 1036,
ZYNQMP_PM_RESET_SPI1 = 1037,
ZYNQMP_PM_RESET_SDIO0 = 1038,
ZYNQMP_PM_RESET_SDIO1 = 1039,
ZYNQMP_PM_RESET_CAN0 = 1040,
ZYNQMP_PM_RESET_CAN1 = 1041,
ZYNQMP_PM_RESET_I2C0 = 1042,
ZYNQMP_PM_RESET_I2C1 = 1043,
ZYNQMP_PM_RESET_TTC0 = 1044,
ZYNQMP_PM_RESET_TTC1 = 1045,
ZYNQMP_PM_RESET_TTC2 = 1046,
ZYNQMP_PM_RESET_TTC3 = 1047,
ZYNQMP_PM_RESET_SWDT_CRL = 1048,
ZYNQMP_PM_RESET_NAND = 1049,
ZYNQMP_PM_RESET_ADMA = 1050,
ZYNQMP_PM_RESET_GPIO = 1051,
ZYNQMP_PM_RESET_IOU_CC = 1052,
ZYNQMP_PM_RESET_TIMESTAMP = 1053,
ZYNQMP_PM_RESET_RPU_R50 = 1054,
ZYNQMP_PM_RESET_RPU_R51 = 1055,
ZYNQMP_PM_RESET_RPU_AMBA = 1056,
ZYNQMP_PM_RESET_OCM = 1057,
ZYNQMP_PM_RESET_RPU_PGE = 1058,
ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
ZYNQMP_PM_RESET_USB0_APB = 1063,
ZYNQMP_PM_RESET_USB1_APB = 1064,
ZYNQMP_PM_RESET_IPI = 1065,
ZYNQMP_PM_RESET_APM_LPD = 1066,
ZYNQMP_PM_RESET_RTC = 1067,
ZYNQMP_PM_RESET_SYSMON = 1068,
ZYNQMP_PM_RESET_AFI_FM6 = 1069,
ZYNQMP_PM_RESET_LPD_SWDT = 1070,
ZYNQMP_PM_RESET_FPD = 1071,
ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
ZYNQMP_PM_RESET_DBG_LPD = 1074,
ZYNQMP_PM_RESET_DBG_FPD = 1075,
ZYNQMP_PM_RESET_APLL = 1076,
ZYNQMP_PM_RESET_DPLL = 1077,
ZYNQMP_PM_RESET_VPLL = 1078,
ZYNQMP_PM_RESET_IOPLL = 1079,
ZYNQMP_PM_RESET_RPLL = 1080,
ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
ZYNQMP_PM_RESET_RPU_LS = 1113,
ZYNQMP_PM_RESET_PS_ONLY = 1114,
ZYNQMP_PM_RESET_PL = 1115,
ZYNQMP_PM_RESET_PS_PL0 = 1116,
ZYNQMP_PM_RESET_PS_PL1 = 1117,
ZYNQMP_PM_RESET_PS_PL2 = 1118,
ZYNQMP_PM_RESET_PS_PL3 = 1119,
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
SUSPEND_ALERT,
SUSPEND_SYSTEM_SHUTDOWN,
SUSPEND_ALERT = 202,
SUSPEND_SYSTEM_SHUTDOWN = 203,
};
enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NO = 1,
ZYNQMP_PM_REQUEST_ACK_BLOCKING,
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
};
enum pm_node_id {
NODE_SD_0 = 39,
NODE_SD_1,
NODE_SD_1 = 40,
};
enum tap_delay_type {
PM_TAPDELAY_INPUT = 0,
PM_TAPDELAY_OUTPUT,
PM_TAPDELAY_OUTPUT = 1,
};
enum dll_reset_type {
PM_DLL_RESET_ASSERT,
PM_DLL_RESET_RELEASE,
PM_DLL_RESET_PULSE,
PM_DLL_RESET_ASSERT = 0,
PM_DLL_RESET_RELEASE = 1,
PM_DLL_RESET_PULSE = 2,
};
enum zynqmp_pm_shutdown_type {
ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
};
enum zynqmp_pm_shutdown_subtype {
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
};
/**

View File

@@ -0,0 +1,302 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
#else
#define __underlying_memchr __builtin_memchr
#define __underlying_memcmp __builtin_memcmp
#define __underlying_memcpy __builtin_memcpy
#define __underlying_memmove __builtin_memmove
#define __underlying_memset __builtin_memset
#define __underlying_strcat __builtin_strcat
#define __underlying_strcpy __builtin_strcpy
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy
#endif
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 1);
if (__builtin_constant_p(size) && p_size < size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
return __underlying_strncpy(p, q, size);
}
__FORTIFY_INLINE char *strcat(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 1);
if (p_size == (size_t)-1)
return __underlying_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
return p;
}
__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
{
__kernel_size_t ret;
size_t p_size = __builtin_object_size(p, 1);
/* Work around gcc excess stack consumption issue */
if (p_size == (size_t)-1 ||
(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
fortify_panic(__func__);
return ret;
}
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
{
size_t p_size = __builtin_object_size(p, 1);
__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
if (p_size <= ret && maxlen != ret)
fortify_panic(__func__);
return ret;
}
/* defined after fortified strlen to reuse it */
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
{
size_t ret;
size_t p_size = __builtin_object_size(p, 1);
size_t q_size = __builtin_object_size(q, 1);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __real_strlcpy(p, q, size);
ret = strlen(q);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
if (__builtin_constant_p(len) && len >= p_size)
__write_overflow();
if (len >= p_size)
fortify_panic(__func__);
__underlying_memcpy(p, q, len);
p[len] = '\0';
}
return ret;
}
/* defined after fortified strnlen to reuse it */
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
{
size_t len;
/* Use string size rather than possible enclosing struct size. */
size_t p_size = __builtin_object_size(p, 1);
size_t q_size = __builtin_object_size(q, 1);
/* If we cannot get size of p and q default to call strscpy. */
if (p_size == (size_t) -1 && q_size == (size_t) -1)
return __real_strscpy(p, q, size);
/*
* If size can be known at compile time and is greater than
* p_size, generate a compile time write overflow error.
*/
if (__builtin_constant_p(size) && size > p_size)
__write_overflow();
/*
* This call protects from read overflow, because len will default to q
* length if it smaller than size.
*/
len = strnlen(q, size);
/*
* If len equals size, we will copy only size bytes which leads to
* -E2BIG being returned.
* Otherwise we will copy len + 1 because of the final '\O'.
*/
len = len == size ? size : len + 1;
/*
* Generate a runtime write overflow error if len is greater than
* p_size.
*/
if (len > p_size)
fortify_panic(__func__);
/*
* We can now safely call vanilla strscpy because we are protected from:
* 1. Read overflow thanks to call to strnlen().
* 2. Write overflow thanks to above ifs.
*/
return __real_strscpy(p, q, len);
}
/* defined after fortified strlen and strnlen to reuse them */
__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
{
size_t p_len, copy_len;
size_t p_size = __builtin_object_size(p, 1);
size_t q_size = __builtin_object_size(q, 1);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
if (p_size < p_len + copy_len + 1)
fortify_panic(__func__);
__underlying_memcpy(p + p_len, q, copy_len);
p[p_len + copy_len] = '\0';
return p;
}
__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
return __underlying_memset(p, c, size);
}
__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__write_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __underlying_memcpy(p, q, size);
}
__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__write_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __underlying_memmove(p, q, size);
}
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_memscan(p, c, size);
}
__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__read_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __underlying_memcmp(p, q, size);
}
__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __underlying_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_memchr_inv(p, c, size);
}
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_kmemdup(p, size, gfp);
}
/* defined after fortified strlen and memcpy to reuse them */
__FORTIFY_INLINE char *strcpy(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 1);
size_t q_size = __builtin_object_size(q, 1);
size_t size;
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
/* test here to use the more stringent object size */
if (p_size < size)
fortify_panic(__func__);
memcpy(p, q, size);
return p;
}
/* Don't use these outside the FORITFY_SOURCE implementation */
#undef __underlying_memchr
#undef __underlying_memcmp
#undef __underlying_memcpy
#undef __underlying_memmove
#undef __underlying_memset
#undef __underlying_strcat
#undef __underlying_strcpy
#undef __underlying_strlen
#undef __underlying_strncat
#undef __underlying_strncpy
#endif /* _LINUX_FORTIFY_STRING_H_ */

View File

@@ -3080,8 +3080,8 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern int generic_write_check_limits(struct file *file, loff_t pos,
loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
extern ssize_t generic_file_buffered_read(struct kiocb *iocb,
struct iov_iter *to, ssize_t already_read);
ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);

View File

@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <uapi/linux/fsl_mc.h>
#define FSL_MC_VENDOR_FREESCALE 0x1957
@@ -209,8 +210,6 @@ struct fsl_mc_device {
#define to_fsl_mc_device(_dev) \
container_of(_dev, struct fsl_mc_device, dev)
#define MC_CMD_NUM_OF_PARAMS 7
struct mc_cmd_header {
u8 src_id;
u8 flags_hw;
@@ -220,11 +219,6 @@ struct mc_cmd_header {
__le16 cmd_id;
};
struct fsl_mc_command {
__le64 header;
__le64 params[MC_CMD_NUM_OF_PARAMS];
};
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */

View File

@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/err.h>
struct fwnode_operations;
struct device;
@@ -18,9 +19,13 @@ struct device;
/*
* fwnode link flags
*
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
*/
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
#define FWNODE_FLAG_NOT_DEVICE BIT(1)
#define FWNODE_FLAG_INITIALIZED BIT(2)
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -166,7 +171,20 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;
if (initialized)
fwnode->flags |= FWNODE_FLAG_INITIALIZED;
else
fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
}
extern u32 fw_devlink_get_flags(void);
extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);

View File

@@ -8,6 +8,20 @@
#include <linux/linkage.h>
#include <linux/topology.h>
/* The typedef is in types.h but we want the documentation here */
#if 0
/**
* typedef gfp_t - Memory allocation flags.
*
* GFP flags are commonly used throughout Linux to indicate how memory
* should be allocated. The GFP acronym stands for get_free_pages(),
* the underlying memory allocation function. Not every GFP flag is
* supported by every function which may allocate memory. Most users
* will want to use a plain ``GFP_KERNEL``.
*/
typedef unsigned int __bitwise gfp_t;
#endif
struct vm_area_struct;
/*
@@ -620,6 +634,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_PM_SLEEP
extern bool pm_suspended_storage(void);
#else

View File

@@ -127,11 +127,6 @@ static inline unsigned long totalhigh_pages(void)
return (unsigned long)atomic_long_read(&_totalhigh_pages);
}
static inline void totalhigh_pages_inc(void)
{
atomic_long_inc(&_totalhigh_pages);
}
static inline void totalhigh_pages_add(long count)
{
atomic_long_add(count, &_totalhigh_pages);

View File

@@ -276,4 +276,60 @@ static inline void copy_highpage(struct page *to, struct page *from)
#endif
static inline void memcpy_page(struct page *dst_page, size_t dst_off,
struct page *src_page, size_t src_off,
size_t len)
{
char *dst = kmap_local_page(dst_page);
char *src = kmap_local_page(src_page);
VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
memcpy(dst + dst_off, src + src_off, len);
kunmap_local(src);
kunmap_local(dst);
}
static inline void memmove_page(struct page *dst_page, size_t dst_off,
struct page *src_page, size_t src_off,
size_t len)
{
char *dst = kmap_local_page(dst_page);
char *src = kmap_local_page(src_page);
VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
memmove(dst + dst_off, src + src_off, len);
kunmap_local(src);
kunmap_local(dst);
}
static inline void memset_page(struct page *page, size_t offset, int val,
size_t len)
{
char *addr = kmap_local_page(page);
VM_BUG_ON(offset + len > PAGE_SIZE);
memset(addr + offset, val, len);
kunmap_local(addr);
}
static inline void memcpy_from_page(char *to, struct page *page,
size_t offset, size_t len)
{
char *from = kmap_local_page(page);
VM_BUG_ON(offset + len > PAGE_SIZE);
memcpy(to, from + offset, len);
kunmap_local(from);
}
static inline void memcpy_to_page(struct page *page, size_t offset,
const char *from, size_t len)
{
char *to = kmap_local_page(page);
VM_BUG_ON(offset + len > PAGE_SIZE);
memcpy(to + offset, from, len);
kunmap_local(to);
}
#endif /* _LINUX_HIGHMEM_H */

View File

@@ -78,6 +78,7 @@ static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
}
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_NEVER_DAX,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -123,6 +124,13 @@ extern unsigned long transparent_hugepage_flags;
*/
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
/*
* If the hardware/firmware marked hugepage support disabled.
*/
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
return false;
if (vma->vm_flags & VM_NOHUGEPAGE)
return false;
@@ -134,12 +142,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
return true;
/*
* For dax vmas, try to always use hugepage mappings. If the kernel does
* not support hugepages, fsdax mappings will fallback to PAGE_SIZE
* mappings, and device-dax namespaces, that try to guarantee a given
* mapping size, will fail to enable
*/
if (vma_is_dax(vma))
return true;

View File

@@ -37,7 +37,7 @@ struct hugepage_subpool {
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
/* sasitfy minimum size. */
/* satisfy minimum size. */
};
struct resv_map {
@@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep);
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -472,6 +472,84 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
/*
* huegtlb page specific state flags. These flags are located in page.private
* of the hugetlb head page. Functions created via the below macros should be
* used to manipulate these flags.
*
* HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
* allocation time. Cleared when page is fully instantiated. Free
* routine checks flag to restore a reservation on error paths.
* Synchronization: Examined or modified by code that knows it has
* the only reference to page. i.e. After allocation but before use
* or when the page is being freed.
* HPG_migratable - Set after a newly allocated page is added to the page
* cache and/or page tables. Indicates the page is a candidate for
* migration.
* Synchronization: Initially set after new page allocation with no
* locking. When examined and modified during migration processing
* (isolate, migrate, putback) the hugetlb_lock is held.
* HPG_temporary - - Set on a page that is temporarily allocated from the buddy
* allocator. Typically used for migration target pages when no pages
* are available in the pool. The hugetlb free page path will
* immediately free pages with this flag set to the buddy allocator.
* Synchronization: Can be set after huge page allocation from buddy when
* code knows it has only reference. All other examinations and
* modifications require hugetlb_lock.
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
HPG_migratable,
HPG_temporary,
HPG_freed,
__NR_HPAGEFLAGS,
};
/*
* Macros to create test, set and clear function definitions for
* hugetlb specific page flags.
*/
#ifdef CONFIG_HUGETLB_PAGE
#define TESTHPAGEFLAG(uname, flname) \
static inline int HPage##uname(struct page *page) \
{ return test_bit(HPG_##flname, &(page->private)); }
#define SETHPAGEFLAG(uname, flname) \
static inline void SetHPage##uname(struct page *page) \
{ set_bit(HPG_##flname, &(page->private)); }
#define CLEARHPAGEFLAG(uname, flname) \
static inline void ClearHPage##uname(struct page *page) \
{ clear_bit(HPG_##flname, &(page->private)); }
#else
#define TESTHPAGEFLAG(uname, flname) \
static inline int HPage##uname(struct page *page) \
{ return 0; }
#define SETHPAGEFLAG(uname, flname) \
static inline void SetHPage##uname(struct page *page) \
{ }
#define CLEARHPAGEFLAG(uname, flname) \
static inline void ClearHPage##uname(struct page *page) \
{ }
#endif
#define HPAGEFLAG(uname, flname) \
TESTHPAGEFLAG(uname, flname) \
SETHPAGEFLAG(uname, flname) \
CLEARHPAGEFLAG(uname, flname) \
/*
* Create functions associated with hugetlb page flags
*/
HPAGEFLAG(RestoreReserve, restore_reserve)
HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
@@ -531,6 +609,20 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
/*
* hugetlb page subpool pointer located in hpage[1].private
*/
static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{
return (struct hugepage_subpool *)(hpage+1)->private;
}
static inline void hugetlb_set_page_subpool(struct page *hpage,
struct hugepage_subpool *subpool)
{
set_page_private(hpage+1, (unsigned long)subpool);
}
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
@@ -770,8 +862,6 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
void set_page_huge_active(struct page *page);
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};

View File

@@ -3,6 +3,7 @@
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr);
const struct in6_addr *force_saddr,
const struct inet6_skb_parm *parm);
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr);
const struct in6_addr *force_saddr,
const struct inet6_skb_parm *parm);
#if IS_BUILTIN(CONFIG_IPV6)
static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct inet6_skb_parm *parm)
{
icmp6_send(skb, type, code, info, NULL);
icmp6_send(skb, type, code, info, NULL, parm);
}
static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
{
@@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
return 0;
}
#else
extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
#endif
static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
__icmpv6_send(skb, type, code, info, IP6CB(skb));
}
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
#define icmpv6_ndo_send icmpv6_send
static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
{
struct inet6_skb_parm parm = { 0 };
__icmpv6_send(skb_in, type, code, info, &parm);
}
#endif
#else

View File

@@ -338,14 +338,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
__setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
__setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);

View File

@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_INITRD_H
#define __LINUX_INITRD_H
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
@@ -15,6 +18,12 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
#ifdef CONFIG_BLK_DEV_INITRD
extern void __init reserve_initrd_mem(void);
#else
static inline void __init reserve_initrd_mem(void) {}
#endif
extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
@@ -24,3 +33,5 @@ extern char __initramfs_start[];
extern unsigned long __initramfs_size;
void console_on_rootfs(void);
#endif /* __LINUX_INITRD_H */

View File

@@ -1,35 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) Intel 2011
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The PTI (Parallel Trace Interface) driver directs trace data routed from
* various parts in the system out through the Intel Penwell PTI port and
* out of the mobile device for analysis with a debugging tool
* (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
* compact JTAG, standard.
*
* This header file will allow other parts of the OS to use the
* interface to write out it's contents for debugging a mobile system.
*/
#ifndef LINUX_INTEL_PTI_H_
#define LINUX_INTEL_PTI_H_
/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
#define PTI_LASTDWORD_DTS 0x30
/* basic structure used as a write address to the PTI HW */
struct pti_masterchannel {
u8 master;
u8 channel;
};
/* the following functions are defined in misc/pti.c */
void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
struct pti_masterchannel *pti_request_masterchannel(u8 type,
const char *thread_name);
void pti_release_masterchannel(struct pti_masterchannel *mc);
#endif /* LINUX_INTEL_PTI_H_ */

View File

@@ -569,15 +569,6 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
{
__do_softirq();
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);

View File

@@ -5,23 +5,6 @@
#include <linux/sched.h>
#include <linux/xarray.h>
struct io_identity {
struct files_struct *files;
struct mm_struct *mm;
#ifdef CONFIG_BLK_CGROUP
struct cgroup_subsys_state *blkcg_css;
#endif
const struct cred *creds;
struct nsproxy *nsproxy;
struct fs_struct *fs;
unsigned long fsize;
#ifdef CONFIG_AUDIT
kuid_t loginuid;
unsigned int sessionid;
#endif
refcount_t count;
};
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -36,9 +19,8 @@ struct io_uring_task {
struct xarray xa;
struct wait_queue_head wait;
struct file *last;
void *io_wq;
struct percpu_counter inflight;
struct io_identity __identity;
struct io_identity *identity;
atomic_t in_idle;
bool sqpoll;
@@ -56,12 +38,12 @@ void __io_uring_free(struct task_struct *tsk);
static inline void io_uring_task_cancel(void)
{
if (current->io_uring && !xa_empty(&current->io_uring->xa))
if (current->io_uring)
__io_uring_task_cancel();
}
static inline void io_uring_files_cancel(struct files_struct *files)
{
if (current->io_uring && !xa_empty(&current->io_uring->xa))
if (current->io_uring)
__io_uring_files_cancel(files);
}
static inline void io_uring_free(struct task_struct *tsk)

View File

@@ -85,7 +85,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>

View File

@@ -4,6 +4,12 @@
#include <linux/types.h>
/*
* The annotations present in this file are only relevant for the software
* KASAN modes that rely on compiler instrumentation, and will be optimized
* away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
*/
/*
* __kasan_check_*: Always available when KASAN is enabled. This may be used
* even in compilation units that selectively disable KASAN, but must use KASAN

View File

@@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
bool is_kmalloc;
};
#ifdef CONFIG_KASAN_HW_TAGS
@@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
__kasan_cache_create(cache, size, flags);
}
void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
{
if (kasan_enabled())
__kasan_cache_create_kmalloc(cache);
}
size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
@@ -185,19 +193,25 @@ static __always_inline void * __must_check kasan_init_slab_obj(
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip)
static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
if (kasan_enabled())
return __kasan_slab_free(s, object, ip);
return __kasan_slab_free(s, object, _RET_IP_);
return false;
}
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr)
{
if (kasan_enabled())
__kasan_slab_free_mempool(ptr, ip);
__kasan_kfree_large(ptr, _RET_IP_);
}
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
static __always_inline void kasan_slab_free_mempool(void *ptr)
{
if (kasan_enabled())
__kasan_slab_free_mempool(ptr, _RET_IP_);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -240,13 +254,19 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
*/
bool __kasan_check_byte(const void *addr, unsigned long ip);
static __always_inline bool kasan_check_byte(const void *addr)
{
if (kasan_enabled())
__kasan_kfree_large(ptr, ip);
return __kasan_check_byte(addr, _RET_IP_);
return true;
}
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);
@@ -266,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
@@ -277,12 +298,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip)
static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
return false;
}
static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
static inline void kasan_kfree_large(void *ptr) {}
static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
@@ -302,7 +323,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline bool kasan_check_byte(const void *address)
{
return true;
}
#endif /* CONFIG_KASAN */

View File

@@ -314,6 +314,8 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
int machine_kexec_post_load(struct kimage *image);
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);

View File

@@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -360,7 +361,7 @@ static inline struct key *request_key(struct key_type *type,
* completion of keys undergoing construction with a non-interruptible wait.
*/
#define request_key_net(type, description, net, callout_info) \
request_key_tag(type, description, net->key_domain, callout_info);
request_key_tag(type, description, net->key_domain, callout_info)
/**
* request_key_net_rcu - Request a key for a net namespace under RCU conditions
@@ -372,7 +373,7 @@ static inline struct key *request_key(struct key_type *type,
* network namespace are used.
*/
#define request_key_net_rcu(type, description, net) \
request_key_rcu(type, description, net->key_domain);
request_key_rcu(type, description, net->key_domain)
#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);

222
include/linux/kfence.h Normal file
View File

@@ -0,0 +1,222 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
* handler integration. For more info see Documentation/dev-tools/kfence.rst.
*
* Copyright (C) 2020, Google LLC.
*/
#ifndef _LINUX_KFENCE_H
#define _LINUX_KFENCE_H
#include <linux/mm.h>
#include <linux/types.h>
#ifdef CONFIG_KFENCE
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
* extended guard page, but otherwise has no special purpose.
*/
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;
#ifdef CONFIG_KFENCE_STATIC_KEYS
#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
#else
#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
#endif
/**
* is_kfence_address() - check if an address belongs to KFENCE pool
* @addr: address to check
*
* Return: true or false depending on whether the address is within the KFENCE
* object range.
*
* KFENCE objects live in a separate page range and are not to be intermixed
* with regular heap objects (e.g. KFENCE objects must never be added to the
* allocator freelists). Failing to do so may and will result in heap
* corruptions, therefore is_kfence_address() must be used to check whether
* an object requires specific handling.
*
* Note: This function may be used in fast-paths, and is performance critical.
* Future changes should take this into account; for instance, we want to avoid
* introducing another load and therefore need to keep KFENCE_POOL_SIZE a
* constant (until immediate patching support is added to the kernel).
*/
static __always_inline bool is_kfence_address(const void *addr)
{
/*
* The non-NULL check is required in case the __kfence_pool pointer was
* never initialized; keep it in the slow-path after the range-check.
*/
return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
}
/**
* kfence_alloc_pool() - allocate the KFENCE pool via memblock
*/
void __init kfence_alloc_pool(void);
/**
* kfence_init() - perform KFENCE initialization at boot time
*
* Requires that kfence_alloc_pool() was called before. This sets up the
* allocation gate timer, and requires that workqueues are available.
*/
void __init kfence_init(void);
/**
* kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
* @s: cache being shut down
*
* Before shutting down a cache, one must ensure there are no remaining objects
* allocated from it. Because KFENCE objects are not referenced from the cache
* directly, we need to check them here.
*
* Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
* not return if allocated objects still exist: it prints an error message and
* simply aborts destruction of a cache, leaking memory.
*
* If the only such objects are KFENCE objects, we will not leak the entire
* cache, but instead try to provide more useful debug info by making allocated
* objects "zombie allocations". Objects may then still be used or freed (which
* is handled gracefully), but usage will result in showing KFENCE error reports
* which include stack traces to the user of the object, the original allocation
* site, and caller to shutdown_cache().
*/
void kfence_shutdown_cache(struct kmem_cache *s);
/*
* Allocate a KFENCE object. Allocators must not call this function directly,
* use kfence_alloc() instead.
*/
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
/**
* kfence_alloc() - allocate a KFENCE object with a low probability
* @s: struct kmem_cache with object requirements
* @size: exact size of the object to allocate (can be less than @s->size
* e.g. for kmalloc caches)
* @flags: GFP flags
*
* Return:
* * NULL - must proceed with allocating as usual,
* * non-NULL - pointer to a KFENCE object.
*
* kfence_alloc() should be inserted into the heap allocation fast path,
* allowing it to transparently return KFENCE-allocated objects with a low
* probability using a static branch (the probability is controlled by the
* kfence.sample_interval boot parameter).
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
#ifdef CONFIG_KFENCE_STATIC_KEYS
if (static_branch_unlikely(&kfence_allocation_key))
#else
if (unlikely(!atomic_read(&kfence_allocation_gate)))
#endif
return __kfence_alloc(s, size, flags);
return NULL;
}
/**
* kfence_ksize() - get actual amount of memory allocated for a KFENCE object
* @addr: pointer to a heap object
*
* Return:
* * 0 - not a KFENCE object, must call __ksize() instead,
* * non-0 - this many bytes can be accessed without causing a memory error.
*
* kfence_ksize() returns the number of bytes requested for a KFENCE object at
* allocation time. This number may be less than the object size of the
* corresponding struct kmem_cache.
*/
size_t kfence_ksize(const void *addr);
/**
* kfence_object_start() - find the beginning of a KFENCE object
* @addr: address within a KFENCE-allocated object
*
* Return: address of the beginning of the object.
*
* SL[AU]B-allocated objects are laid out within a page one by one, so it is
* easy to calculate the beginning of an object given a pointer inside it and
* the object size. The same is not true for KFENCE, which places a single
* object at either end of the page. This helper function is used to find the
* beginning of a KFENCE-allocated object.
*/
void *kfence_object_start(const void *addr);
/**
* __kfence_free() - release a KFENCE heap object to KFENCE pool
* @addr: object to be freed
*
* Requires: is_kfence_address(addr)
*
* Release a KFENCE object and mark it as freed.
*/
void __kfence_free(void *addr);
/**
* kfence_free() - try to release an arbitrary heap object to KFENCE pool
* @addr: object to be freed
*
* Return:
* * false - object doesn't belong to KFENCE pool and was ignored,
* * true - object was released to KFENCE pool.
*
* Release a KFENCE object and mark it as freed. May be called on any object,
* even non-KFENCE objects, to simplify integration of the hooks into the
* allocator's free codepath. The allocator must check the return value to
* determine if it was a KFENCE object or not.
*/
static __always_inline __must_check bool kfence_free(void *addr)
{
if (!is_kfence_address(addr))
return false;
__kfence_free(addr);
return true;
}
/**
* kfence_handle_page_fault() - perform page fault handling for KFENCE pages
* @addr: faulting address
* @is_write: is access a write
* @regs: current struct pt_regs (can be NULL, but shows full stack trace)
*
* Return:
* * false - address outside KFENCE pool,
* * true - page fault handled by KFENCE, no additional handling required.
*
* A page fault inside KFENCE pool indicates a memory error, such as an
* out-of-bounds access, a use-after-free or an invalid memory access. In these
* cases KFENCE prints an error message and marks the offending page as
* present, so that the kernel can proceed.
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
static inline void kfence_alloc_pool(void) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
static inline size_t kfence_ksize(const void *addr) { return 0; }
static inline void *kfence_object_start(const void *addr) { return NULL; }
static inline void __kfence_free(void *addr) { }
static inline bool __must_check kfence_free(void *addr) { return false; }
static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
struct pt_regs *regs)
{
return false;
}
#endif
#endif /* _LINUX_KFENCE_H */

View File

@@ -359,9 +359,11 @@ extern atomic_t kgdb_active;
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
extern void kgdb_panic(const char *msg);
extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
static inline void kgdb_free_init_mem(void) { }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */

View File

@@ -3,6 +3,7 @@
#define _LINUX_KHUGEPAGED_H
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
#include <linux/shmem_fs.h>
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
if ((khugepaged_always() ||
(shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
!(vm_flags & VM_NOHUGEPAGE) &&
!test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))

View File

@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/

View File

@@ -11,6 +11,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
@@ -506,6 +507,8 @@ struct kvm {
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
unsigned long mmu_notifier_range_start;
unsigned long mmu_notifier_range_end;
#endif
long tlbs_dirty;
struct list_head devices;
@@ -733,7 +736,7 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault,
bool *writable);
bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
@@ -1207,6 +1210,26 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
return 1;
return 0;
}
static inline int mmu_notifier_retry_hva(struct kvm *kvm,
unsigned long mmu_seq,
unsigned long hva)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
* If mmu_notifier_count is non-zero, then the range maintained by
* kvm_mmu_notifier_invalidate_range_start contains all addresses that
* might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
if (unlikely(kvm->mmu_notifier_count) &&
hva >= kvm->mmu_notifier_range_start &&
hva < kvm->mmu_notifier_range_end)
return 1;
if (kvm->mmu_notifier_seq != mmu_seq)
return 1;
return 0;
}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING

View File

@@ -85,6 +85,7 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
return container_of(lcdev, struct led_classdev_flash, led_cdev);
}
#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH)
/**
* led_classdev_flash_register_ext - register a new object of LED class with
* init data and with support for flash LEDs
@@ -98,12 +99,6 @@ int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data);
static inline int led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
* with support for flash LEDs
@@ -118,15 +113,44 @@ int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_init_data *init_data);
void devm_led_classdev_flash_unregister(struct device *parent,
struct led_classdev_flash *fled_cdev);
#else
static inline int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data)
{
return 0;
}
static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {};
static inline int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data)
{
return 0;
}
static inline void devm_led_classdev_flash_unregister(struct device *parent,
struct led_classdev_flash *fled_cdev)
{};
#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */
static inline int led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
static inline int devm_led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
void devm_led_classdev_flash_unregister(struct device *parent,
struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_strobe - setup flash strobe
* @fled_cdev: the flash LED to set strobe on

View File

@@ -44,12 +44,6 @@ int led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
static inline int led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{
return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
}
/**
* led_classdev_multicolor_unregister - unregisters an object of led_classdev
* class with support for multicolor LEDs
@@ -68,13 +62,6 @@ int devm_led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
static inline int devm_led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{
return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
NULL);
}
void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev);
#else
@@ -83,27 +70,33 @@ static inline int led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data)
{
return -EINVAL;
}
static inline int led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{
return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
return 0;
}
static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
enum led_brightness brightness)
{
return -EINVAL;
return 0;
}
static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data)
{
return -EINVAL;
return 0;
}
static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{};
#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
static inline int led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{
return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
}
static inline int devm_led_classdev_multicolor_register(struct device *parent,
@@ -113,9 +106,4 @@ static inline int devm_led_classdev_multicolor_register(struct device *parent,
NULL);
}
static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{};
#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */

View File

@@ -63,8 +63,8 @@ struct led_hw_trigger_type {
struct led_classdev {
const char *name;
enum led_brightness brightness;
enum led_brightness max_brightness;
unsigned int brightness;
unsigned int max_brightness;
int flags;
/* Lower 16 bits reflect status */
@@ -253,8 +253,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness);
void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -267,8 +266,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
int led_set_brightness_sync(struct led_classdev *led_cdev,
enum led_brightness value);
int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
* led_update_brightness - update LED brightness
@@ -565,7 +563,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness);
struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }

View File

@@ -3,9 +3,6 @@
* Common LiteX header providing
* helper functions for accessing CSRs.
*
* Implementation of the functions is provided by
* the LiteX SoC Controller driver.
*
* Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
*/
@@ -13,90 +10,147 @@
#define _LINUX_LITEX_H
#include <linux/io.h>
#include <linux/types.h>
#include <linux/compiler_types.h>
/*
* The parameters below are true for LiteX SoCs configured for 8-bit CSR Bus,
* 32-bit aligned.
*
* Supporting other configurations will require extending the logic in this
* header and in the LiteX SoC controller driver.
*/
#define LITEX_REG_SIZE 0x4
#define LITEX_SUBREG_SIZE 0x1
/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */
#if defined(CONFIG_LITEX_SUBREG_SIZE) && \
(CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4)
#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE
#else
#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1!
#endif
#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8)
#define WRITE_LITEX_SUBREGISTER(val, base_offset, subreg_id) \
writel((u32 __force)cpu_to_le32(val), base_offset + (LITEX_REG_SIZE * subreg_id))
/* LiteX subregisters of any width are always aligned on a 4-byte boundary */
#define LITEX_SUBREG_ALIGN 0x4
#define READ_LITEX_SUBREGISTER(base_offset, subreg_id) \
le32_to_cpu((__le32 __force)readl(base_offset + (LITEX_REG_SIZE * subreg_id)))
static inline void _write_litex_subregister(u32 val, void __iomem *addr)
{
writel((u32 __force)cpu_to_le32(val), addr);
}
void litex_set_reg(void __iomem *reg, unsigned long reg_sz, unsigned long val);
static inline u32 _read_litex_subregister(void __iomem *addr)
{
return le32_to_cpu((__le32 __force)readl(addr));
}
unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_sz);
/*
* LiteX SoC Generator, depending on the configuration, can split a single
* logical CSR (Control&Status Register) into a series of consecutive physical
* registers.
*
* For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
* 32-bit wide logical CSR will be laid out as four 32-bit physical
* subregisters, each one containing one byte of meaningful data.
*
* For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
*/
/* number of LiteX subregisters needed to store a register of given reg_size */
#define _litex_num_subregs(reg_size) \
(((reg_size) - 1) / LITEX_SUBREG_SIZE + 1)
/*
* since the number of 4-byte aligned subregisters required to store a single
* LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the
* next adjacent LiteX CSR register w.r.t. the offset of the current one also
* depends on how many subregisters the latter is spread across
*/
#define _next_reg_off(off, size) \
((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN)
/*
* The purpose of `_litex_[set|get]_reg()` is to implement the logic of
* writing to/reading from the LiteX CSR in a single place that can be then
* reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()`
* accessors for the appropriate data width.
* NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly
* discouraged, as they perform no error checking on the requested data width!
*/
/**
* _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register)
* @reg: Address of the CSR
* @reg_size: The width of the CSR expressed in the number of bytes
* @val: Value to be written to the CSR
*
* This function splits a single (possibly multi-byte) LiteX CSR write into
* a series of subregister writes with a proper offset.
* NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
*/
static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val)
{
u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT;
while (shift > 0) {
shift -= LITEX_SUBREG_SIZE_BIT;
_write_litex_subregister(val >> shift, reg);
reg += LITEX_SUBREG_ALIGN;
}
}
/**
* _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register)
* @reg: Address of the CSR
* @reg_size: The width of the CSR expressed in the number of bytes
*
* Return: Value read from the CSR
*
* This function generates a series of subregister reads with a proper offset
* and joins their results into a single (possibly multi-byte) LiteX CSR value.
* NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
*/
static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size)
{
u64 r;
u8 i;
r = _read_litex_subregister(reg);
for (i = 1; i < _litex_num_subregs(reg_size); i++) {
r <<= LITEX_SUBREG_SIZE_BIT;
reg += LITEX_SUBREG_ALIGN;
r |= _read_litex_subregister(reg);
}
return r;
}
static inline void litex_write8(void __iomem *reg, u8 val)
{
WRITE_LITEX_SUBREGISTER(val, reg, 0);
_litex_set_reg(reg, sizeof(u8), val);
}
static inline void litex_write16(void __iomem *reg, u16 val)
{
WRITE_LITEX_SUBREGISTER(val >> 8, reg, 0);
WRITE_LITEX_SUBREGISTER(val, reg, 1);
_litex_set_reg(reg, sizeof(u16), val);
}
static inline void litex_write32(void __iomem *reg, u32 val)
{
WRITE_LITEX_SUBREGISTER(val >> 24, reg, 0);
WRITE_LITEX_SUBREGISTER(val >> 16, reg, 1);
WRITE_LITEX_SUBREGISTER(val >> 8, reg, 2);
WRITE_LITEX_SUBREGISTER(val, reg, 3);
_litex_set_reg(reg, sizeof(u32), val);
}
static inline void litex_write64(void __iomem *reg, u64 val)
{
WRITE_LITEX_SUBREGISTER(val >> 56, reg, 0);
WRITE_LITEX_SUBREGISTER(val >> 48, reg, 1);
WRITE_LITEX_SUBREGISTER(val >> 40, reg, 2);
WRITE_LITEX_SUBREGISTER(val >> 32, reg, 3);
WRITE_LITEX_SUBREGISTER(val >> 24, reg, 4);
WRITE_LITEX_SUBREGISTER(val >> 16, reg, 5);
WRITE_LITEX_SUBREGISTER(val >> 8, reg, 6);
WRITE_LITEX_SUBREGISTER(val, reg, 7);
_litex_set_reg(reg, sizeof(u64), val);
}
static inline u8 litex_read8(void __iomem *reg)
{
return READ_LITEX_SUBREGISTER(reg, 0);
return _litex_get_reg(reg, sizeof(u8));
}
static inline u16 litex_read16(void __iomem *reg)
{
return (READ_LITEX_SUBREGISTER(reg, 0) << 8)
| (READ_LITEX_SUBREGISTER(reg, 1));
return _litex_get_reg(reg, sizeof(u16));
}
static inline u32 litex_read32(void __iomem *reg)
{
return (READ_LITEX_SUBREGISTER(reg, 0) << 24)
| (READ_LITEX_SUBREGISTER(reg, 1) << 16)
| (READ_LITEX_SUBREGISTER(reg, 2) << 8)
| (READ_LITEX_SUBREGISTER(reg, 3));
return _litex_get_reg(reg, sizeof(u32));
}
static inline u64 litex_read64(void __iomem *reg)
{
return ((u64)READ_LITEX_SUBREGISTER(reg, 0) << 56)
| ((u64)READ_LITEX_SUBREGISTER(reg, 1) << 48)
| ((u64)READ_LITEX_SUBREGISTER(reg, 2) << 40)
| ((u64)READ_LITEX_SUBREGISTER(reg, 3) << 32)
| ((u64)READ_LITEX_SUBREGISTER(reg, 4) << 24)
| ((u64)READ_LITEX_SUBREGISTER(reg, 5) << 16)
| ((u64)READ_LITEX_SUBREGISTER(reg, 6) << 8)
| ((u64)READ_LITEX_SUBREGISTER(reg, 7));
return _litex_get_reg(reg, sizeof(u64));
}
#endif /* _LINUX_LITEX_H */

View File

@@ -42,7 +42,7 @@ struct device *mdev_get_iommu_device(struct device *dev);
* @mdev: mdev_device structure on of mediated device
* that is being created
* Returns integer: success (0) or error (< 0)
* @remove: Called to free resources in parent device's driver for a
* @remove: Called to free resources in parent device's driver for
* a mediated device. It is mandatory to provide 'remove'
* ops.
* @mdev: mdev_device device structure which is being

View File

@@ -68,7 +68,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
int (*remove)(struct mei_cl_device *cldev);
void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,

View File

@@ -92,6 +92,10 @@ struct lruvec_stat {
long count[NR_VM_NODE_STAT_ITEMS];
};
struct batched_lruvec_stat {
s32 count[NR_VM_NODE_STAT_ITEMS];
};
/*
* Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
* which have elements charged to this memcg.
@@ -107,11 +111,17 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
/* Legacy local VM stats */
/*
* Legacy local VM stats. This should be struct lruvec_stat and
* cannot be optimized to struct batched_lruvec_stat. Because
* the threshold of the lruvec_stat_cpu can be as big as
* MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
* filed has no upper limit.
*/
struct lruvec_stat __percpu *lruvec_stat_local;
/* Subtree VM stats (batched updates) */
struct lruvec_stat __percpu *lruvec_stat_cpu;
struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
@@ -475,19 +485,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}
/*
* set_page_objcgs - associate a page with a object cgroups vector
* @page: a pointer to the page struct
* @objcgs: a pointer to the object cgroups vector
*
* Atomically associates a page with a vector of object cgroups.
*/
static inline bool set_page_objcgs(struct page *page,
struct obj_cgroup **objcgs)
{
return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
MEMCG_DATA_OBJCGS);
}
#else
static inline struct obj_cgroup **page_objcgs(struct page *page)
{
@@ -498,12 +495,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
{
return NULL;
}
static inline bool set_page_objcgs(struct page *page,
struct obj_cgroup **objcgs)
{
return true;
}
#endif
static __always_inline bool memcg_stat_item_in_bytes(int idx)
@@ -689,8 +680,6 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
struct lruvec *lock_page_lruvec(struct page *page);
struct lruvec *lock_page_lruvec_irq(struct page *page);
struct lruvec *lock_page_lruvec_irqsave(struct page *page,
@@ -1200,11 +1189,6 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
{
return NULL;
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1601,9 +1585,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
#endif
#ifdef CONFIG_MEMCG_KMEM
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
unsigned int nr_pages);
void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);

View File

@@ -27,9 +27,8 @@ struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
struct device dev;
int nid; /* NID for this memory block */
struct device dev;
};
int arch_get_memory_phys_device(unsigned long start_pfn);

View File

@@ -16,22 +16,7 @@ struct resource;
struct vmem_altmap;
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Return page for the valid pfn only if the page is online. All pfn
* walkers which rely on the fully initialized page->flags and others
* should use this rather than pfn_valid && pfn_to_page
*/
#define pfn_to_online_page(pfn) \
({ \
struct page *___page = NULL; \
unsigned long ___pfn = pfn; \
unsigned long ___nr = pfn_to_section_nr(___pfn); \
\
if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
pfn_valid_within(___pfn)) \
___page = pfn_to_page(___pfn); \
___page; \
})
struct page *pfn_to_online_page(unsigned long pfn);
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -68,7 +53,7 @@ typedef int __bitwise mhp_t;
* with this flag set, the resource pointer must no longer be used as it
* might be stale, or the resource might have changed.
*/
#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
/*
* Extended parameters for memory hotplug:
@@ -81,6 +66,9 @@ struct mhp_params {
pgprot_t pgprot;
};
bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
struct range mhp_get_pluggable_range(bool need_mapping);
/*
* Zone resizing functions
*
@@ -131,10 +119,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params);
extern u64 max_mem_size;
extern int memhp_online_type_from_str(const char *str);
extern int mhp_online_type_from_str(const char *str);
/* Default online_type (MMOP_*) when new memory blocks are added. */
extern int memhp_default_online_type;
extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -281,6 +269,13 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
* platforms might override and use arch_get_mappable_range()
* for internal non memory hotplug purposes.
*/
struct range arch_get_mappable_range(void);
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions

View File

@@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
@@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return NULL;
}
static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
return false;
}
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;

View File

@@ -1,453 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Core interface for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#ifndef __LINUX_MFD_INTEL_MSIC_H__
#define __LINUX_MFD_INTEL_MSIC_H__
/* ID */
#define INTEL_MSIC_ID0 0x000 /* RO */
#define INTEL_MSIC_ID1 0x001 /* RO */
/* IRQ */
#define INTEL_MSIC_IRQLVL1 0x002
#define INTEL_MSIC_ADC1INT 0x003
#define INTEL_MSIC_CCINT 0x004
#define INTEL_MSIC_PWRSRCINT 0x005
#define INTEL_MSIC_PWRSRCINT1 0x006
#define INTEL_MSIC_CHRINT 0x007
#define INTEL_MSIC_CHRINT1 0x008
#define INTEL_MSIC_RTCIRQ 0x009
#define INTEL_MSIC_GPIO0LVIRQ 0x00a
#define INTEL_MSIC_GPIO1LVIRQ 0x00b
#define INTEL_MSIC_GPIOHVIRQ 0x00c
#define INTEL_MSIC_VRINT 0x00d
#define INTEL_MSIC_OCAUDIO 0x00e
#define INTEL_MSIC_ACCDET 0x00f
#define INTEL_MSIC_RESETIRQ1 0x010
#define INTEL_MSIC_RESETIRQ2 0x011
#define INTEL_MSIC_MADC1INT 0x012
#define INTEL_MSIC_MCCINT 0x013
#define INTEL_MSIC_MPWRSRCINT 0x014
#define INTEL_MSIC_MPWRSRCINT1 0x015
#define INTEL_MSIC_MCHRINT 0x016
#define INTEL_MSIC_MCHRINT1 0x017
#define INTEL_MSIC_RTCIRQMASK 0x018
#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
#define INTEL_MSIC_VRINTMASK 0x01c
#define INTEL_MSIC_OCAUDIOMASK 0x01d
#define INTEL_MSIC_ACCDETMASK 0x01e
#define INTEL_MSIC_RESETIRQ1MASK 0x01f
#define INTEL_MSIC_RESETIRQ2MASK 0x020
#define INTEL_MSIC_IRQLVL1MSK 0x021
#define INTEL_MSIC_PBCONFIG 0x03e
#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
/* GPIO */
#define INTEL_MSIC_GPIO0LV7CTLO 0x040
#define INTEL_MSIC_GPIO0LV6CTLO 0x041
#define INTEL_MSIC_GPIO0LV5CTLO 0x042
#define INTEL_MSIC_GPIO0LV4CTLO 0x043
#define INTEL_MSIC_GPIO0LV3CTLO 0x044
#define INTEL_MSIC_GPIO0LV2CTLO 0x045
#define INTEL_MSIC_GPIO0LV1CTLO 0x046
#define INTEL_MSIC_GPIO0LV0CTLO 0x047
#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
#define INTEL_MSIC_GPIO1LV6CTLO 0x049
#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
#define INTEL_MSIC_GPIO0LV7CTLI 0x050
#define INTEL_MSIC_GPIO0LV6CTLI 0x051
#define INTEL_MSIC_GPIO0LV5CTLI 0x052
#define INTEL_MSIC_GPIO0LV4CTLI 0x053
#define INTEL_MSIC_GPIO0LV3CTLI 0x054
#define INTEL_MSIC_GPIO0LV2CTLI 0x055
#define INTEL_MSIC_GPIO0LV1CTLI 0x056
#define INTEL_MSIC_GPIO0LV0CTLI 0x057
#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
#define INTEL_MSIC_GPIO1LV6CTLI 0x059
#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
#define INTEL_MSIC_PWM0CLKDIV1 0x061
#define INTEL_MSIC_PWM0CLKDIV0 0x062
#define INTEL_MSIC_PWM1CLKDIV1 0x063
#define INTEL_MSIC_PWM1CLKDIV0 0x064
#define INTEL_MSIC_PWM2CLKDIV1 0x065
#define INTEL_MSIC_PWM2CLKDIV0 0x066
#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
#define INTEL_MSIC_GPIO0HV0CTLO 0x070
#define INTEL_MSIC_GPIO1HV3CTLO 0x071
#define INTEL_MSIC_GPIO1HV2CTLO 0x072
#define INTEL_MSIC_GPIO1HV1CTLO 0x073
#define INTEL_MSIC_GPIO1HV0CTLO 0x074
#define INTEL_MSIC_GPIO0HV3CTLI 0x075
#define INTEL_MSIC_GPIO0HV2CTLI 0x076
#define INTEL_MSIC_GPIO0HV1CTLI 0x077
#define INTEL_MSIC_GPIO0HV0CTLI 0x078
#define INTEL_MSIC_GPIO1HV3CTLI 0x079
#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
/* SVID */
#define INTEL_MSIC_SVIDCTRL0 0x080
#define INTEL_MSIC_SVIDCTRL1 0x081
#define INTEL_MSIC_SVIDCTRL2 0x082
#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
/* VREG */
#define INTEL_MSIC_VCCLATCH 0x0c0
#define INTEL_MSIC_VNNLATCH 0x0c1
#define INTEL_MSIC_VCCCNT 0x0c2
#define INTEL_MSIC_SMPSRAMP 0x0c3
#define INTEL_MSIC_VNNCNT 0x0c4
#define INTEL_MSIC_VNNAONCNT 0x0c5
#define INTEL_MSIC_VCC122AONCNT 0x0c6
#define INTEL_MSIC_V180AONCNT 0x0c7
#define INTEL_MSIC_V500CNT 0x0c8
#define INTEL_MSIC_VIHFCNT 0x0c9
#define INTEL_MSIC_LDORAMP1 0x0ca
#define INTEL_MSIC_LDORAMP2 0x0cb
#define INTEL_MSIC_VCC108AONCNT 0x0cc
#define INTEL_MSIC_VCC108ASCNT 0x0cd
#define INTEL_MSIC_VCC108CNT 0x0ce
#define INTEL_MSIC_VCCA100ASCNT 0x0cf
#define INTEL_MSIC_VCCA100CNT 0x0d0
#define INTEL_MSIC_VCC180AONCNT 0x0d1
#define INTEL_MSIC_VCC180CNT 0x0d2
#define INTEL_MSIC_VCC330CNT 0x0d3
#define INTEL_MSIC_VUSB330CNT 0x0d4
#define INTEL_MSIC_VCCSDIOCNT 0x0d5
#define INTEL_MSIC_VPROG1CNT 0x0d6
#define INTEL_MSIC_VPROG2CNT 0x0d7
#define INTEL_MSIC_VEMMCSCNT 0x0d8
#define INTEL_MSIC_VEMMC1CNT 0x0d9
#define INTEL_MSIC_VEMMC2CNT 0x0da
#define INTEL_MSIC_VAUDACNT 0x0db
#define INTEL_MSIC_VHSPCNT 0x0dc
#define INTEL_MSIC_VHSNCNT 0x0dd
#define INTEL_MSIC_VHDMICNT 0x0de
#define INTEL_MSIC_VOTGCNT 0x0df
#define INTEL_MSIC_V1P35CNT 0x0e0
#define INTEL_MSIC_V330AONCNT 0x0e1
/* RESET */
#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
#define INTEL_MSIC_ERCONFIG 0x101
/* BURST */
#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
#define INTEL_MSIC_BATTIMELIMIT12 0x103
#define INTEL_MSIC_BATTIMELIMIT3 0x104
#define INTEL_MSIC_BATTIMEDB 0x105
#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
/* RTC */
#define INTEL_MSIC_RTCB1 0x140 /* RO */
#define INTEL_MSIC_RTCB2 0x141 /* RO */
#define INTEL_MSIC_RTCB3 0x142 /* RO */
#define INTEL_MSIC_RTCB4 0x143 /* RO */
#define INTEL_MSIC_RTCOB1 0x144
#define INTEL_MSIC_RTCOB2 0x145
#define INTEL_MSIC_RTCOB3 0x146
#define INTEL_MSIC_RTCOB4 0x147
#define INTEL_MSIC_RTCAB1 0x148
#define INTEL_MSIC_RTCAB2 0x149
#define INTEL_MSIC_RTCAB3 0x14a
#define INTEL_MSIC_RTCAB4 0x14b
#define INTEL_MSIC_RTCWAB1 0x14c
#define INTEL_MSIC_RTCWAB2 0x14d
#define INTEL_MSIC_RTCWAB3 0x14e
#define INTEL_MSIC_RTCWAB4 0x14f
#define INTEL_MSIC_RTCSC1 0x150
#define INTEL_MSIC_RTCSC2 0x151
#define INTEL_MSIC_RTCSC3 0x152
#define INTEL_MSIC_RTCSC4 0x153
#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
#define INTEL_MSIC_RTCCONFIG1 0x155
#define INTEL_MSIC_RTCCONFIG2 0x156
/* CHARGER */
#define INTEL_MSIC_BDTIMER 0x180
#define INTEL_MSIC_BATTRMV 0x181
#define INTEL_MSIC_VBUSDET 0x182
#define INTEL_MSIC_VBUSDET1 0x183
#define INTEL_MSIC_ADPHVDET 0x184
#define INTEL_MSIC_ADPLVDET 0x185
#define INTEL_MSIC_ADPDETDBDM 0x186
#define INTEL_MSIC_LOWBATTDET 0x187
#define INTEL_MSIC_CHRCTRL 0x188
#define INTEL_MSIC_CHRCVOLTAGE 0x189
#define INTEL_MSIC_CHRCCURRENT 0x18a
#define INTEL_MSIC_SPCHARGER 0x18b
#define INTEL_MSIC_CHRTTIME 0x18c
#define INTEL_MSIC_CHRCTRL1 0x18d
#define INTEL_MSIC_PWRSRCLMT 0x18e
#define INTEL_MSIC_CHRSTWDT 0x18f
#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
#define INTEL_MSIC_CHRSAFELMT 0x191
#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
#define INTEL_MSIC_CHRLEDPWM 0x194
#define INTEL_MSIC_CHRLEDCTRL 0x195
/* ADC */
#define INTEL_MSIC_ADC1CNTL1 0x1c0
#define INTEL_MSIC_ADC1CNTL2 0x1c1
#define INTEL_MSIC_ADC1CNTL3 0x1c2
#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
#define INTEL_MSIC_ADC1ADDR0 0x1c5
#define INTEL_MSIC_ADC1ADDR1 0x1c6
#define INTEL_MSIC_ADC1ADDR2 0x1c7
#define INTEL_MSIC_ADC1ADDR3 0x1c8
#define INTEL_MSIC_ADC1ADDR4 0x1c9
#define INTEL_MSIC_ADC1ADDR5 0x1ca
#define INTEL_MSIC_ADC1ADDR6 0x1cb
#define INTEL_MSIC_ADC1ADDR7 0x1cc
#define INTEL_MSIC_ADC1ADDR8 0x1cd
#define INTEL_MSIC_ADC1ADDR9 0x1ce
#define INTEL_MSIC_ADC1ADDR10 0x1cf
#define INTEL_MSIC_ADC1ADDR11 0x1d0
#define INTEL_MSIC_ADC1ADDR12 0x1d1
#define INTEL_MSIC_ADC1ADDR13 0x1d2
#define INTEL_MSIC_ADC1ADDR14 0x1d3
#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
#define INTEL_MSIC_CCCNTL 0x202
#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
#define INTEL_MSIC_CCADCHA 0x205 /* RO */
#define INTEL_MSIC_CCADCLA 0x206 /* RO */
/* AUDIO */
#define INTEL_MSIC_AUDPLLCTRL 0x240
#define INTEL_MSIC_DMICBUF0123 0x241
#define INTEL_MSIC_DMICBUF45 0x242
#define INTEL_MSIC_DMICGPO 0x244
#define INTEL_MSIC_DMICMUX 0x245
#define INTEL_MSIC_DMICCLK 0x246
#define INTEL_MSIC_MICBIAS 0x247
#define INTEL_MSIC_ADCCONFIG 0x248
#define INTEL_MSIC_MICAMP1 0x249
#define INTEL_MSIC_MICAMP2 0x24a
#define INTEL_MSIC_NOISEMUX 0x24b
#define INTEL_MSIC_AUDIOMUX12 0x24c
#define INTEL_MSIC_AUDIOMUX34 0x24d
#define INTEL_MSIC_AUDIOSINC 0x24e
#define INTEL_MSIC_AUDIOTXEN 0x24f
#define INTEL_MSIC_HSEPRXCTRL 0x250
#define INTEL_MSIC_IHFRXCTRL 0x251
#define INTEL_MSIC_VOICETXVOL 0x252
#define INTEL_MSIC_SIDETONEVOL 0x253
#define INTEL_MSIC_MUSICSHARVOL 0x254
#define INTEL_MSIC_VOICETXCTRL 0x255
#define INTEL_MSIC_HSMIXER 0x256
#define INTEL_MSIC_DACCONFIG 0x257
#define INTEL_MSIC_SOFTMUTE 0x258
#define INTEL_MSIC_HSLVOLCTRL 0x259
#define INTEL_MSIC_HSRVOLCTRL 0x25a
#define INTEL_MSIC_IHFLVOLCTRL 0x25b
#define INTEL_MSIC_IHFRVOLCTRL 0x25c
#define INTEL_MSIC_DRIVEREN 0x25d
#define INTEL_MSIC_LINEOUTCTRL 0x25e
#define INTEL_MSIC_VIB1CTRL1 0x25f
#define INTEL_MSIC_VIB1CTRL2 0x260
#define INTEL_MSIC_VIB1CTRL3 0x261
#define INTEL_MSIC_VIB1SPIPCM_1 0x262
#define INTEL_MSIC_VIB1SPIPCM_2 0x263
#define INTEL_MSIC_VIB1CTRL5 0x264
#define INTEL_MSIC_VIB2CTRL1 0x265
#define INTEL_MSIC_VIB2CTRL2 0x266
#define INTEL_MSIC_VIB2CTRL3 0x267
#define INTEL_MSIC_VIB2SPIPCM_1 0x268
#define INTEL_MSIC_VIB2SPIPCM_2 0x269
#define INTEL_MSIC_VIB2CTRL5 0x26a
#define INTEL_MSIC_BTNCTRL1 0x26b
#define INTEL_MSIC_BTNCTRL2 0x26c
#define INTEL_MSIC_PCM1TXSLOT01 0x26d
#define INTEL_MSIC_PCM1TXSLOT23 0x26e
#define INTEL_MSIC_PCM1TXSLOT45 0x26f
#define INTEL_MSIC_PCM1RXSLOT0123 0x270
#define INTEL_MSIC_PCM1RXSLOT045 0x271
#define INTEL_MSIC_PCM2TXSLOT01 0x272
#define INTEL_MSIC_PCM2TXSLOT23 0x273
#define INTEL_MSIC_PCM2TXSLOT45 0x274
#define INTEL_MSIC_PCM2RXSLOT01 0x275
#define INTEL_MSIC_PCM2RXSLOT23 0x276
#define INTEL_MSIC_PCM2RXSLOT45 0x277
#define INTEL_MSIC_PCM1CTRL1 0x278
#define INTEL_MSIC_PCM1CTRL2 0x279
#define INTEL_MSIC_PCM1CTRL3 0x27a
#define INTEL_MSIC_PCM2CTRL1 0x27b
#define INTEL_MSIC_PCM2CTRL2 0x27c
/* HDMI */
#define INTEL_MSIC_HDMIPUEN 0x280
#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
/* Physical address of the start of the MSIC interrupt tree in SRAM */
#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
/**
* struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
* @gpio_base: base number for the GPIOs
*/
struct intel_msic_gpio_pdata {
unsigned gpio_base;
};
/**
* struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
* @gpio: GPIO number used for OCD interrupts
*
* The MSIC MFD driver converts @gpio into an IRQ number and passes it to
* the OCD driver as %IORESOURCE_IRQ.
*/
struct intel_msic_ocd_pdata {
unsigned gpio;
};
/* MSIC embedded blocks (subdevices) */
enum intel_msic_block {
INTEL_MSIC_BLOCK_TOUCH,
INTEL_MSIC_BLOCK_ADC,
INTEL_MSIC_BLOCK_BATTERY,
INTEL_MSIC_BLOCK_GPIO,
INTEL_MSIC_BLOCK_AUDIO,
INTEL_MSIC_BLOCK_HDMI,
INTEL_MSIC_BLOCK_THERMAL,
INTEL_MSIC_BLOCK_POWER_BTN,
INTEL_MSIC_BLOCK_OCD,
INTEL_MSIC_BLOCK_LAST,
};
/**
* struct intel_msic_platform_data - platform data for the MSIC driver
* @irq: array of interrupt numbers, one per device. If @irq is set to %0
* for a given block, the corresponding platform device is not
* created. For devices which don't have an interrupt, use %0xff
* (this is same as in SFI spec).
* @gpio: platform data for the MSIC GPIO driver
* @ocd: platform data for the MSIC OCD driver
*
* Once the MSIC driver is initialized, the register interface is ready to
* use. All the platform devices for subdevices are created after the
* register interface is ready so that we can guarantee its availability to
* the subdevice drivers.
*
* Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
* resources of the created platform device.
*/
struct intel_msic_platform_data {
int irq[INTEL_MSIC_BLOCK_LAST];
struct intel_msic_gpio_pdata *gpio;
struct intel_msic_ocd_pdata *ocd;
};
struct intel_msic;
extern int intel_msic_reg_read(unsigned short reg, u8 *val);
extern int intel_msic_reg_write(unsigned short reg, u8 val);
extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
/*
* pdev_to_intel_msic - gets an MSIC instance from the platform device
* @pdev: platform device pointer
*
* The client drivers need to have pointer to the MSIC instance if they
* want to call intel_msic_irq_read(). This macro can be used for
* convenience to get the MSIC pointer from @pdev where needed. This is
* _only_ valid for devices which are managed by the MSIC.
*/
#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
u8 *val);
#endif /* __LINUX_MFD_INTEL_MSIC_H__ */

View File

@@ -279,7 +279,7 @@ struct mhi_controller_config {
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
u32 num_events;
const struct mhi_event_config *event_cfg;
struct mhi_event_config *event_cfg;
bool use_bounce_buf;
bool m2_no_db;
};
@@ -347,12 +347,14 @@ struct mhi_controller_config {
* @unmap_single: CB function to destroy TRE buffer
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
* @reset: Controller specific reset function (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @pre_init: MHI host needs to do pre-initialization before power up
* @wake_set: Device wakeup set flag
* @irq_flags: irq flags passed to request_irq (optional)
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
@@ -437,6 +439,7 @@ struct mhi_controller {
u32 *out);
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
void (*reset)(struct mhi_controller *mhi_cntrl);
size_t buffer_len;
int index;
@@ -444,6 +447,7 @@ struct mhi_controller {
bool fbc_download;
bool pre_init;
bool wake_set;
unsigned long irq_flags;
};
/**
@@ -681,6 +685,13 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
*/
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_soc_reset - Trigger a device reset. This can be used as a last resort
* to reset and recover a device.
* @mhi_cntrl: MHI controller
*/
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
/**
* mhi_device_get - Disable device low power mode
* @mhi_dev: Device associated with the channel

View File

@@ -89,7 +89,7 @@ extern int PageMovable(struct page *page);
extern void __SetPageMovable(struct page *page, struct address_space *mapping);
extern void __ClearPageMovable(struct page *page);
#else
static inline int PageMovable(struct page *page) { return 0; };
static inline int PageMovable(struct page *page) { return 0; }
static inline void __SetPageMovable(struct page *page,
struct address_space *mapping)
{

View File

@@ -1187,6 +1187,9 @@ static inline void get_page(struct page *page)
}
bool __must_check try_grab_page(struct page *page, unsigned int flags);
__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
unsigned int flags);
static inline __must_check bool try_get_page(struct page *page)
{
@@ -2310,32 +2313,20 @@ extern void free_initmem(void);
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
#ifdef CONFIG_HIGHMEM
/*
* Free a highmem page into the buddy system, adjusting totalhigh_pages
* and totalram_pages.
*/
extern void free_highmem_page(struct page *page);
#endif
extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
}
static inline void free_reserved_page(struct page *page)
{
__free_reserved_page(page);
adjust_managed_page_count(page, 1);
}
#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -2405,9 +2396,10 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void memmap_init_zone(struct zone *zone);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);

View File

@@ -24,7 +24,7 @@ static inline int page_is_file_lru(struct page *page)
return !PageSwapBacked(page);
}
static __always_inline void __update_lru_size(struct lruvec *lruvec,
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
int nr_pages)
{
@@ -33,76 +33,27 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
}
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
int nr_pages)
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
}
/**
* page_lru_base_type - which LRU list type should a page be on?
* @page: the page to test
*
* Used for LRU list index arithmetic.
*
* Returns the base LRU type - file or anon - @page should be on.
* __clear_page_lru_flags - clear page lru flags before releasing a page
* @page: the page that was on lru and now has a zero reference
*/
static inline enum lru_list page_lru_base_type(struct page *page)
static __always_inline void __clear_page_lru_flags(struct page *page)
{
if (page_is_file_lru(page))
return LRU_INACTIVE_FILE;
return LRU_INACTIVE_ANON;
}
VM_BUG_ON_PAGE(!PageLRU(page), page);
/**
* page_off_lru - which LRU list was page on? clearing its lru flags.
* @page: the page to test
*
* Returns the LRU list a page was on, as an index into the array of LRU
* lists; and clears its Unevictable or Active flags, ready for freeing.
*/
static __always_inline enum lru_list page_off_lru(struct page *page)
{
enum lru_list lru;
__ClearPageLRU(page);
if (PageUnevictable(page)) {
__ClearPageUnevictable(page);
lru = LRU_UNEVICTABLE;
} else {
lru = page_lru_base_type(page);
if (PageActive(page)) {
__ClearPageActive(page);
lru += LRU_ACTIVE;
}
}
return lru;
/* this shouldn't happen, so leave the flags to bad_page() */
if (PageActive(page) && PageUnevictable(page))
return;
__ClearPageActive(page);
__ClearPageUnevictable(page);
}
/**
@@ -116,13 +67,41 @@ static __always_inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru;
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
if (PageUnevictable(page))
lru = LRU_UNEVICTABLE;
else {
lru = page_lru_base_type(page);
if (PageActive(page))
lru += LRU_ACTIVE;
}
return LRU_UNEVICTABLE;
lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
if (PageActive(page))
lru += LRU_ACTIVE;
return lru;
}
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec)
{
enum lru_list lru = page_lru(page);
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec)
{
enum lru_list lru = page_lru(page);
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec)
{
list_del(&page->lru);
update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-thp_nr_pages(page));
}
#endif

View File

@@ -206,9 +206,29 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
NR_VM_NODE_STAT_ITEMS
};
/*
* Returns true if the item should be printed in THPs (/proc/vmstat
* currently prints number of anon, file and shmem THPs. But the item
* is charged in pages).
*/
static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return false;
return item == NR_ANON_THPS ||
item == NR_FILE_THPS ||
item == NR_SHMEM_THPS ||
item == NR_SHMEM_PMDMAPPED ||
item == NR_FILE_PMDMAPPED;
}
/*
* Returns true if the value is measured in bytes (most vmstat values are
* measured in pages). This defines the API part, the internal representation
@@ -483,6 +503,9 @@ struct zone {
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
* cma pages is present pages that are assigned for CMA use
* (MIGRATE_CMA).
*
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
@@ -507,6 +530,9 @@ struct zone {
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
#ifdef CONFIG_CMA
unsigned long cma_pages;
#endif
const char *name;
@@ -604,6 +630,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone)
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
static inline unsigned long zone_cma_pages(struct zone *zone)
{
#ifdef CONFIG_CMA
return zone->cma_pages;
#else
return 0;
#endif
}
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -872,8 +907,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#endif
}
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -885,6 +918,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
static inline bool zone_is_zone_device(struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool zone_is_zone_device(struct zone *zone)
{
return false;
}
#endif
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -1273,13 +1318,14 @@ extern size_t mem_section_usage_size(void);
* which results in PFN_SECTION_SHIFT equal 6.
* To sum it up, at least 6 bits are available.
*/
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_IS_ONLINE (1UL<<2)
#define SECTION_IS_EARLY (1UL<<3)
#define SECTION_MAP_LAST_BIT (1UL<<4)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
#define SECTION_NID_SHIFT 3
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_IS_ONLINE (1UL<<2)
#define SECTION_IS_EARLY (1UL<<3)
#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
#define SECTION_MAP_LAST_BIT (1UL<<5)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
#define SECTION_NID_SHIFT 3
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1318,6 +1364,13 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
static inline int online_device_section(struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));

View File

@@ -864,4 +864,28 @@ struct ssam_device_id {
kernel_ulong_t driver_data;
};
/*
* DFL (Device Feature List)
*
* DFL defines a linked list of feature headers within the device MMIO space to
* provide an extensible way of adding features. Software can walk through these
* predefined data structures to enumerate features. It is now used in the FPGA.
* See Documentation/fpga/dfl.rst for more information.
*
* The dfl bus type is introduced to match the individual feature devices (dfl
* devices) for specific dfl drivers.
*/
/**
* struct dfl_device_id - dfl device identifier
* @type: DFL FIU type of the device. See enum dfl_id_type.
* @feature_id: feature identifier local to its DFL FIU type.
* @driver_data: driver specific data.
*/
struct dfl_device_id {
__u16 type;
__u16 feature_id;
kernel_ulong_t driver_data;
};
#endif /* LINUX_MOD_DEVICETABLE_H */

View File

@@ -86,7 +86,6 @@ struct path;
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);

View File

@@ -26,7 +26,7 @@ struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
int (*remove)(struct device *dev);
void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};

View File

@@ -42,8 +42,6 @@ struct net;
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
#define PROTO_CMSG_DATA_ONLY 0x0001
#ifndef ARCH_HAS_SOCKET_TYPES
/**
* enum sock_type - Socket types
@@ -138,7 +136,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
struct proto_ops {
int family;
unsigned int flags;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,

View File

@@ -1584,6 +1584,12 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
ML_PRIV_CAN,
};
/**
* struct net_device - The DEVICE structure.
*
@@ -1779,6 +1785,7 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
* @lstats: Loopback statistics
* @tstats: Tunnel statistics
* @dstats: Dummy statistics
@@ -2094,8 +2101,10 @@ struct net_device {
possible_net_t nd_net;
/* mid-layer private */
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
union {
void *ml_priv;
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
@@ -2286,6 +2295,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
static inline void *netdev_get_ml_priv(struct net_device *dev,
enum netdev_ml_priv_type type)
{
if (dev->ml_priv_type != type)
return NULL;
return dev->ml_priv;
}
static inline void netdev_set_ml_priv(struct net_device *dev,
void *ml_priv,
enum netdev_ml_priv_type type)
{
WARN(dev->ml_priv_type && dev->ml_priv_type != type,
"Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
dev->ml_priv_type, type);
WARN(!dev->ml_priv_type && dev->ml_priv,
"Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
dev->ml_priv = ml_priv;
dev->ml_priv_type = type;
}
/*
* Net namespace inlines
*/

View File

@@ -388,6 +388,7 @@ extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
@@ -571,8 +572,6 @@ nfs_have_writebacks(struct inode *inode)
extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
/*
* inline functions

View File

@@ -142,7 +142,7 @@ struct nfs_server {
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
unsigned int flags; /* various flags */
/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
@@ -153,6 +153,8 @@ struct nfs_server {
#define NFS_MOUNT_LOCAL_FCNTL 0x200000
#define NFS_MOUNT_SOFTERR 0x400000
#define NFS_MOUNT_SOFTREVAL 0x800000
#define NFS_MOUNT_WRITE_EAGER 0x01000000
#define NFS_MOUNT_WRITE_WAIT 0x02000000
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */

View File

@@ -33,8 +33,6 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
@@ -42,6 +40,8 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
extern void of_irq_init(const struct of_device_id *matches);
#ifdef CONFIG_OF_IRQ
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@@ -57,6 +57,11 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
}
static inline int of_irq_count(struct device_node *dev)
{
return 0;

View File

@@ -592,15 +592,9 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
bool page_huge_active(struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
static inline bool page_huge_active(struct page *page)
{
return 0;
}
#endif
@@ -816,7 +810,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is freed. Pages being freed should not have
* these flags set. It they are, there is a problem.
* these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
@@ -827,7 +821,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is prepped for return by the page allocator.
* Pages being prepped should not have these flags set. It they are set,
* Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's

View File

@@ -12,7 +12,6 @@ struct page_counter {
unsigned long low;
unsigned long high;
unsigned long max;
struct page_counter *parent;
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -27,6 +26,14 @@ struct page_counter {
/* legacy */
unsigned long watermark;
unsigned long failcnt;
/*
* 'parent' is placed here to be far from 'usage' to reduce
* cache false sharing, as 'usage' is written mostly while
* parent is frequently read for cgroup's hierarchical
* counting nature.
*/
struct page_counter *parent;
};
#if BITS_PER_LONG == 32

View File

@@ -315,6 +315,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020
#define FGP_FOR_MMAP 0x00000040
#define FGP_HEAD 0x00000080
#define FGP_ENTRY 0x00000100
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask);
@@ -450,8 +451,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
}
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
@@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
extern void put_and_wait_on_page_locked(struct page *page);
int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
@@ -757,9 +756,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
int whence);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:

View File

@@ -25,10 +25,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t start, unsigned nr_entries,
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping,

View File

@@ -13,6 +13,12 @@
struct pci_epc;
enum pci_epc_interface_type {
UNKNOWN_INTERFACE = -1,
PRIMARY_INTERFACE,
SECONDARY_INTERFACE,
};
enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
@@ -20,6 +26,19 @@ enum pci_epc_irq_type {
PCI_EPC_IRQ_MSIX,
};
static inline const char *
pci_epc_interface_string(enum pci_epc_interface_type type)
{
switch (type) {
case PRIMARY_INTERFACE:
return "primary";
case SECONDARY_INTERFACE:
return "secondary";
default:
return "UNKNOWN interface";
}
}
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -36,6 +55,7 @@ enum pci_epc_irq_type {
* @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
* from the MSI-X capability register
* @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
* @owner: the module owner containing the ops
@@ -58,6 +78,10 @@ struct pci_epc_ops {
int (*get_msix)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int (*map_msi_irq)(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
@@ -175,10 +199,12 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
@@ -195,14 +221,19 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
enum pci_barno, u32 offset);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no);
unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
*epc_features);
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
*epc_features, enum pci_barno bar);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);

View File

@@ -9,11 +9,13 @@
#ifndef __LINUX_PCI_EPF_H
#define __LINUX_PCI_EPF_H
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
struct pci_epf;
enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
@@ -21,6 +23,7 @@ enum pci_notify_event {
};
enum pci_barno {
NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -60,10 +63,13 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
* @add_cfs: ops to initialize function specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
struct config_group *(*add_cfs)(struct pci_epf *epf,
struct config_group *group);
};
/**
@@ -118,6 +124,12 @@ struct pci_epf_bar {
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
* @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
* EPC device
* @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
* @sec_epc_func_no: unique (physical) function number within the secondary EPC
* @group: configfs group associated with the EPF device
*/
struct pci_epf {
struct device dev;
@@ -134,6 +146,13 @@ struct pci_epf {
struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
/* Below members are to attach secondary EPC to an endpoint function */
struct pci_epc *sec_epc;
struct list_head sec_epc_list;
struct pci_epf_bar sec_epc_bar[6];
u8 sec_epc_func_no;
struct config_group *group;
};
/**
@@ -164,16 +183,17 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
return dev_get_drvdata(&epf->dev);
}
const struct pci_epf_device_id *
pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
struct pci_epf *pci_epf_create(const char *name);
void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
size_t align);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
size_t align, enum pci_epc_interface_type type);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
struct config_group *group);
#endif /* __LINUX_PCI_EPF_H */

View File

@@ -51,6 +51,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -881,6 +882,7 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_DRA74x 0xb500
#define PCI_DEVICE_ID_TI_DRA72x 0xb501
@@ -2588,6 +2590,8 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8

View File

@@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
#ifndef pte_sw_mkyoung
static inline pte_t pte_sw_mkyoung(pte_t pte)
{
return pte;
}
#define pte_sw_mkyoung pte_sw_mkyoung
#endif
#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif

View File

@@ -1,61 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Header file for the Atmel AHB DMA Controller driver
*
* Copyright (C) 2008 Atmel Corporation
*/
#ifndef AT_HDMAC_H
#define AT_HDMAC_H
#include <linux/dmaengine.h>
/**
* struct at_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8)
* @cap_mask: dma_capability flags supported by the platform
*/
struct at_dma_platform_data {
unsigned int nr_channels;
dma_cap_mask_t cap_mask;
};
/**
* struct at_dma_slave - Controller-specific information about a slave
* @dma_dev: required DMA master device
* @cfg: Platform-specific initializer for the CFG register
*/
struct at_dma_slave {
struct device *dma_dev;
u32 cfg;
};
/* Platform-configurable bits in CFG */
#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
#define ATC_SRC_H2SEL_SW (0x0 << 9)
#define ATC_SRC_H2SEL_HW (0x1 << 9)
#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
#define ATC_DST_H2SEL_SW (0x0 << 13)
#define ATC_DST_H2SEL_HW (0x1 << 13)
#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
#define ATC_SOD (0x1 << 16) /* Stop On Done */
#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
#endif /* AT_HDMAC_H */

View File

@@ -1,72 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for the COH901318 DMA controller
* Copyright (C) 2007-2013 ST-Ericsson
*/
#ifndef PLAT_COH901318_H
#define PLAT_COH901318_H
#ifdef CONFIG_COH901318
/* We only support the U300 DMA channels */
#define U300_DMA_MSL_TX_0 0
#define U300_DMA_MSL_TX_1 1
#define U300_DMA_MSL_TX_2 2
#define U300_DMA_MSL_TX_3 3
#define U300_DMA_MSL_TX_4 4
#define U300_DMA_MSL_TX_5 5
#define U300_DMA_MSL_TX_6 6
#define U300_DMA_MSL_RX_0 7
#define U300_DMA_MSL_RX_1 8
#define U300_DMA_MSL_RX_2 9
#define U300_DMA_MSL_RX_3 10
#define U300_DMA_MSL_RX_4 11
#define U300_DMA_MSL_RX_5 12
#define U300_DMA_MSL_RX_6 13
#define U300_DMA_MMCSD_RX_TX 14
#define U300_DMA_MSPRO_TX 15
#define U300_DMA_MSPRO_RX 16
#define U300_DMA_UART0_TX 17
#define U300_DMA_UART0_RX 18
#define U300_DMA_APEX_TX 19
#define U300_DMA_APEX_RX 20
#define U300_DMA_PCM_I2S0_TX 21
#define U300_DMA_PCM_I2S0_RX 22
#define U300_DMA_PCM_I2S1_TX 23
#define U300_DMA_PCM_I2S1_RX 24
#define U300_DMA_XGAM_CDI 25
#define U300_DMA_XGAM_PDI 26
#define U300_DMA_SPI_TX 27
#define U300_DMA_SPI_RX 28
#define U300_DMA_GENERAL_PURPOSE_0 29
#define U300_DMA_GENERAL_PURPOSE_1 30
#define U300_DMA_GENERAL_PURPOSE_2 31
#define U300_DMA_GENERAL_PURPOSE_3 32
#define U300_DMA_GENERAL_PURPOSE_4 33
#define U300_DMA_GENERAL_PURPOSE_5 34
#define U300_DMA_GENERAL_PURPOSE_6 35
#define U300_DMA_GENERAL_PURPOSE_7 36
#define U300_DMA_GENERAL_PURPOSE_8 37
#define U300_DMA_UART1_TX 38
#define U300_DMA_UART1_RX 39
#define U300_DMA_DEVICE_CHANNELS 32
#define U300_DMA_CHANNELS 40
/**
* coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle
* @chan_id: id of dma channel to be filter out
*
* In dma_request_channel() it specifies what channel id to be requested
*/
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
#else
static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
return false;
}
#endif
#endif /* PLAT_COH901318_H */

View File

@@ -57,15 +57,4 @@ struct sdma_script_start_addrs {
/* End of v4 array */
};
/**
* struct sdma_platform_data - platform specific data for SDMA engine
*
* @fw_name The firmware name
* @script_addrs SDMA scripts addresses in SDMA ROM
*/
struct sdma_platform_data {
char *fw_name;
struct sdma_script_start_addrs *script_addrs;
};
#endif /* __MACH_MXC_SDMA_H__ */

View File

@@ -12,9 +12,8 @@
#include <linux/bitops.h>
/*
* If more options are added please update profile_names
* array in platform-profile.c and sysfs-platform-profile.rst
* documentation.
* If more options are added please update profile_names array in
* platform_profile.c and sysfs-platform_profile documentation.
*/
enum platform_profile_option {
@@ -22,6 +21,7 @@ enum platform_profile_option {
PLATFORM_PROFILE_COOL,
PLATFORM_PROFILE_QUIET,
PLATFORM_PROFILE_BALANCED,
PLATFORM_PROFILE_BALANCED_PERFORMANCE,
PLATFORM_PROFILE_PERFORMANCE,
PLATFORM_PROFILE_LAST, /*must always be last */
};

View File

@@ -171,7 +171,7 @@ static inline void ptrace_event(int event, unsigned long message)
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
* the ptrace parent's pid namespace.
* ptrace parent's pid namespace.
*
* Called without locks.
*/

Some files were not shown because too many files have changed in this diff Show More