Merge tag 'reset-gpio-for-v6.19' of https://git.pengutronix.de/git/pza/linux into gpio/for-next

Reset/GPIO/swnode changes for v6.19

* Extend software node implementation, allowing its properties to reference
  existing firmware nodes.
* Update the GPIO property interface to use reworked swnode macros.
* Rework reset-gpio code to use GPIO lookup via swnode.
* Fix spi-cs42l43 driver to work with swnode changes.
This commit is contained in:
Bartosz Golaszewski
2025-11-20 19:17:47 +01:00
484 changed files with 4207 additions and 1888 deletions

View File

@@ -644,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rae Moar <raemoar63@gmail.com> <rmoar@google.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>

View File

@@ -2036,6 +2036,10 @@ S: Botanicka' 68a
S: 602 00 Brno
S: Czech Republic
N: Karsten Keil
E: isdn@linux-pingi.de
D: ISDN subsystem maintainer
N: Jakob Kemi
E: jakob.kemi@telia.com
D: V4L W9966 Webcam driver

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/ti,twl4030-gpio.yaml#
$id: http://devicetree.org/schemas/gpio/ti,twl4030-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI TWL4030 GPIO controller

View File

@@ -180,9 +180,9 @@ allOf:
then:
properties:
reg:
minItems: 2
maxItems: 2
reg-names:
minItems: 2
maxItems: 2
else:
properties:
reg:

View File

@@ -32,7 +32,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 4
items:
enum: [1, 2, 3, 4]
@@ -48,7 +48,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 5
items:
enum: [1, 2, 3, 4, 5]

View File

@@ -37,8 +37,8 @@ which corresponds to the following ASL (in the scope of \_SB)::
Name (_HID, ...)
Name (_CRS, ResourceTemplate () {
I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
AddressingMode7Bit, "\\_SB.SMB1.CH00", 0x00,
ResourceConsumer,,)
AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH00",
0x00, ResourceConsumer,,)
}
}
}
@@ -52,8 +52,8 @@ which corresponds to the following ASL (in the scope of \_SB)::
Name (_HID, ...)
Name (_CRS, ResourceTemplate () {
I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
AddressingMode7Bit, "\\_SB.SMB1.CH01", 0x00,
ResourceConsumer,,)
AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH01",
0x00, ResourceConsumer,,)
}
}
}

View File

@@ -605,6 +605,8 @@ operations:
reply: &pin-attrs
attributes:
- id
- module-name
- clock-id
- board-label
- panel-label
- package-label

View File

@@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
Please send bug reports to Matt Mackall <mpm@selenic.com>
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
Introduction:
=============

View File

@@ -4818,6 +4818,7 @@ F: drivers/net/dsa/b53/*
F: drivers/net/dsa/bcm_sf2*
F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
F: net/dsa/tag_brcm.c
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Florian Fainelli <florian.fainelli@broadcom.com>
@@ -12528,6 +12529,7 @@ F: include/linux/avf/virtchnl.h
F: include/linux/net/intel/*/
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
M: Krzysztof Czurylo <krzysztof.czurylo@intel.com>
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -12868,7 +12870,8 @@ F: tools/testing/selftests/sgx/*
K: \bSGX_
INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
M: Daniel Scally <djrscally@gmail.com>
M: Daniel Scally <dan.scally@ideasonboard.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
S: Maintained
F: drivers/platform/x86/intel/int3472/
F: include/linux/platform_data/x86/int3472.h
@@ -13267,10 +13270,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
F: drivers/infiniband/ulp/isert
ISDN/CMTP OVER BLUETOOTH
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
S: Odd Fixes
S: Orphan
W: http://www.isdn4linux.de
F: Documentation/isdn/
F: drivers/isdn/capi/
@@ -13279,10 +13280,8 @@ F: include/uapi/linux/isdn/
F: net/bluetooth/cmtp/
ISDN/mISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
W: http://www.isdn4linux.de
F: drivers/isdn/Kconfig
F: drivers/isdn/Makefile
@@ -13436,9 +13435,12 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
M: Nathan Chancellor <nathan@kernel.org>
M: Nicolas Schier <nsc@kernel.org>
L: linux-kbuild@vger.kernel.org
S: Orphan
S: Odd Fixes
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
@@ -13623,7 +13625,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendan.higgins@linux.dev>
M: David Gow <davidgow@google.com>
R: Rae Moar <rmoar@google.com>
R: Rae Moar <raemoar63@gmail.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@@ -20168,6 +20170,7 @@ R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
R: Jiri Olsa <jolsa@kernel.org>
R: Ian Rogers <irogers@google.com>
R: Adrian Hunter <adrian.hunter@intel.com>
R: James Clark <james.clark@linaro.org>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Supported
@@ -21339,6 +21342,7 @@ F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
L: wcn36xx@lists.infradead.org
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
F: drivers/net/wireless/ath/wcn36xx/

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@@ -917,6 +917,13 @@ config ARCH_USES_CFI_TRAPS
An architecture should select this option if it requires the
.kcfi_traps section for KCFI trap handling.
config ARCH_USES_CFI_GENERIC_LLVM_PASS
bool
help
An architecture should select this option if it uses the generic
KCFIPass in LLVM to expand kCFI bundles instead of architecture-specific
lowering.
config CFI
bool "Use Kernel Control Flow Integrity (kCFI)"
default CFI_CLANG

View File

@@ -44,6 +44,8 @@ config ARM
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
# https://github.com/llvm/llvm-project/commit/d130f402642fba3d065aacb506cb061c899558de
select ARCH_USES_CFI_GENERIC_LLVM_PASS if CLANG_VERSION < 220000
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_GENERAL_HUGETLB
select ARCH_WANT_IPC_PARSE_VERSION

View File

@@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const u8 priv_sp = bpf2a64[PRIVATE_SP];
@@ -1757,8 +1758,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx);
dst = tmp3;
}
if (dst == fp) {
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;

View File

@@ -109,7 +109,7 @@ endif
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
else
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers
endif
ifdef CONFIG_LTO_CLANG
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.

View File

@@ -35,6 +35,8 @@
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
#define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
@@ -257,12 +259,15 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
if (pc_is_kernel_fn(pc, _switch_to) ||
pc == (unsigned long)&_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
if (ALIGNMENT_OK(info->prev_sp, long))
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
else
info->prev_ip = info->prev_sp = 0;
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc == (unsigned long)&_call_on_stack) {
if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
@@ -370,8 +375,10 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
else if (rpoffset)
else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long))
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
else
info->rp = 0;
info->prev_ip = info->rp;
info->rp = 0;
}

View File

@@ -12,6 +12,12 @@
#define __ASM_STR(x) #x
#endif
#ifdef CONFIG_AS_HAS_INSN
#define ASM_INSN_I(__x) ".insn " __x
#else
#define ASM_INSN_I(__x) ".4byte " __x
#endif
#if __riscv_xlen == 64
#define __REG_SEL(a, b) __ASM_STR(a)
#elif __riscv_xlen == 32

View File

@@ -256,10 +256,10 @@
INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \
SIMM12((offset) & 0xfe0), RS1(base))
#define RISCV_PAUSE ".4byte 0x100000f"
#define ZAWRS_WRS_NTO ".4byte 0x00d00073"
#define ZAWRS_WRS_STO ".4byte 0x01d00073"
#define RISCV_NOP4 ".4byte 0x00000013"
#define RISCV_PAUSE ASM_INSN_I("0x100000f")
#define ZAWRS_WRS_NTO ASM_INSN_I("0x00d00073")
#define ZAWRS_WRS_STO ASM_INSN_I("0x01d00073")
#define RISCV_NOP4 ASM_INSN_I("0x00000013")
#define RISCV_INSN_NOP4 _AC(0x00000013, U)

View File

@@ -30,8 +30,8 @@ extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_mips;
* allowing any subsequent instructions to fetch.
*/
#define MIPS_PAUSE ".4byte 0x00501013\n\t"
#define MIPS_EHB ".4byte 0x00301013\n\t"
#define MIPS_IHB ".4byte 0x00101013\n\t"
#define MIPS_PAUSE ASM_INSN_I("0x00501013\n\t")
#define MIPS_EHB ASM_INSN_I("0x00301013\n\t")
#define MIPS_IHB ASM_INSN_I("0x00101013\n\t")
#endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H

View File

@@ -265,10 +265,10 @@ void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
{
if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
sizeof(gdb_xfer_read_target)))
strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
strscpy(remcom_out_buffer, riscv_gdb_stub_target_desc, BUFMAX);
else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
sizeof(gdb_xfer_read_cpuxml)))
strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
strscpy(remcom_out_buffer, riscv_gdb_stub_cpuxml, BUFMAX);
}
static inline void kgdb_arch_update_addr(struct pt_regs *regs,

View File

@@ -119,6 +119,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned int num_plts = 0;
unsigned int num_gots = 0;
Elf_Rela *scratch = NULL;
Elf_Rela *new_scratch;
size_t scratch_size = 0;
int i;
@@ -168,9 +169,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
scratch_size_needed = (num_scratch_relas + num_relas) * sizeof(*scratch);
if (scratch_size_needed > scratch_size) {
scratch_size = scratch_size_needed;
scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL);
if (!scratch)
new_scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL);
if (!new_scratch) {
kvfree(scratch);
return -ENOMEM;
}
scratch = new_scratch;
}
for (size_t j = 0; j < num_relas; j++)

View File

@@ -16,6 +16,22 @@
#ifdef CONFIG_FRAME_POINTER
/*
* This disables KASAN checking when reading a value from another task's stack,
* since the other task could be running on another CPU and could have poisoned
* the stack in the meantime.
*/
#define READ_ONCE_TASK_STACK(task, x) \
({ \
unsigned long val; \
unsigned long addr = x; \
if ((task) == current) \
val = READ_ONCE(addr); \
else \
val = READ_ONCE_NOCHECK(addr); \
val; \
})
extern asmlinkage void handle_exception(void);
extern unsigned long ret_from_exception_end;
@@ -69,8 +85,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame->ra;
pc = regs->ra;
} else {
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
fp = READ_ONCE_TASK_STACK(task, frame->fp);
pc = READ_ONCE_TASK_STACK(task, frame->ra);
pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
&frame->ra);
if (pc >= (unsigned long)handle_exception &&
pc < (unsigned long)&ret_from_exception_end) {

View File

@@ -31,7 +31,7 @@ config RISCV_MODULE_LINKING_KUNIT
If unsure, say N.
config RISCV_KPROBES_KUNIT
bool "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS
tristate "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS
depends on KUNIT
depends on KPROBES
default KUNIT_ALL_TESTS

View File

@@ -1 +1,3 @@
obj-y += test-kprobes.o test-kprobes-asm.o
obj-$(CONFIG_RISCV_KPROBES_KUNIT) += kprobes_riscv_kunit.o
kprobes_riscv_kunit-objs := test-kprobes.o test-kprobes-asm.o

View File

@@ -49,8 +49,11 @@ static struct kunit_case kprobes_testcases[] = {
};
static struct kunit_suite kprobes_test_suite = {
.name = "kprobes_test_riscv",
.name = "kprobes_riscv",
.test_cases = kprobes_testcases,
};
kunit_test_suites(&kprobes_test_suite);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit test for riscv kprobes");

View File

@@ -21,7 +21,7 @@
#define pt_dump_seq_puts(m, fmt) \
({ \
if (m) \
seq_printf(m, fmt); \
seq_puts(m, fmt); \
})
/*

View File

@@ -158,7 +158,6 @@ config S390
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_THP_SWAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2

View File

@@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
@@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
@@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@@ -33,7 +33,6 @@ CONFIG_NET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y

View File

@@ -169,11 +169,18 @@ struct kmac_sha2_ctx {
u64 buflen[2];
};
enum async_op {
OP_NOP = 0,
OP_UPDATE,
OP_FINAL,
OP_FINUP,
};
/* phmac request context */
struct phmac_req_ctx {
struct hash_walk_helper hwh;
struct kmac_sha2_ctx kmac_ctx;
bool final;
enum async_op async_op;
};
/*
@@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->async_op = OP_UPDATE;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req->nbytes = 0;
req_ctx->final = true;
req_ctx->async_op = OP_FINAL;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)
if (rc)
goto out;
req_ctx->async_op = OP_FINUP;
/* Try synchronous operations if no active engine usage */
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
req->nbytes = 0;
req_ctx->async_op = OP_FINAL;
}
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
if (!rc && req_ctx->async_op == OP_FINAL &&
!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
@@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->final = true;
/* req->async_op has been set to either OP_FINUP or OP_FINAL */
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
/*
* Three kinds of requests come in here:
* update when req->nbytes > 0 and req_ctx->final is false
* final when req->nbytes = 0 and req_ctx->final is true
* finup when req->nbytes > 0 and req_ctx->final is true
* For update and finup the hwh walk needs to be prepared and
* up to date but the actual nr of bytes in req->nbytes may be
* any non zero number. For final there is no hwh walk needed.
* 1. req->async_op == OP_UPDATE with req->nbytes > 0
* 2. req->async_op == OP_FINUP with req->nbytes > 0
* 3. req->async_op == OP_FINAL
* For update and finup the hwh walk has already been prepared
* by the caller. For final there is no hwh walk needed.
*/
if (req->nbytes) {
switch (req_ctx->async_op) {
case OP_UPDATE:
case OP_FINUP:
rc = phmac_kmac_update(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
hwh_advance(hwh, rc);
goto out;
}
req->nbytes = 0;
}
if (req_ctx->final) {
if (req_ctx->async_op == OP_UPDATE)
break;
req_ctx->async_op = OP_FINAL;
fallthrough;
case OP_FINAL:
rc = phmac_kmac_final(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
cond_resched();
return -ENOSPC;
}
break;
default:
/* unknown/unsupported/unimplemented asynch op */
return -EOPNOTSUPP;
}
out:
if (rc || req_ctx->final)
if (rc || req_ctx->async_op == OP_FINAL)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("request complete with rc=%d\n", rc);
local_bh_disable();

View File

@@ -145,7 +145,6 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 irqs_registered : 1;
u8 tid_avail : 1;
u8 rtr_avail : 1; /* Relaxed translation allowed */
unsigned int devfn; /* DEVFN part of the RID*/

View File

@@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)
static int add_marker(unsigned long start, unsigned long end, const char *name)
{
size_t oldsize, newsize;
struct addr_marker *new;
size_t newsize;
oldsize = markers_cnt * sizeof(*markers);
newsize = oldsize + 2 * sizeof(*markers);
if (!oldsize)
markers = kvmalloc(newsize, GFP_KERNEL);
else
markers = kvrealloc(markers, newsize, GFP_KERNEL);
if (!markers)
goto error;
newsize = (markers_cnt + 2) * sizeof(*markers);
new = kvrealloc(markers, newsize, GFP_KERNEL);
if (!new)
return -ENOMEM;
markers = new;
markers[markers_cnt].is_start = 1;
markers[markers_cnt].start_address = start;
markers[markers_cnt].size = end - start;
@@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)
markers[markers_cnt].name = name;
markers_cnt++;
return 0;
error:
markers_cnt = 0;
return -ENOMEM;
}
static int pt_dump_init(void)

View File

@@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
pci_dev_lock(pdev);
device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
driver->err_handler->resume(pdev);
pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
pci_dev_unlock(pdev);
device_unlock(&pdev->dev);
zpci_report_status(zdev, "recovery", status_str);
return ers_res;

View File

@@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)
else
rc = zpci_set_airq(zdev);
if (!rc)
zdev->irqs_registered = 1;
return rc;
}
@@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
else
rc = zpci_clear_airq(zdev);
if (!rc)
zdev->irqs_registered = 0;
return rc;
}
@@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
if (!zdev->irqs_registered)
zpci_set_irq(zdev);
zpci_set_irq(zdev);
return true;
}

View File

@@ -75,7 +75,7 @@ export BITS
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
@@ -98,7 +98,7 @@ ifeq ($(CONFIG_X86_KERNEL_IBT),y)
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816
#
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables)
KBUILD_RUSTFLAGS += -Zcf-protection=branch -Zno-jump-tables
KBUILD_RUSTFLAGS += -Zcf-protection=branch $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables)
else
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif

View File

@@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_PANTHERLAKE_L:
case INTEL_WILDCATLAKE_L:
pr_cont("Pantherlake Hybrid events, ");
name = "pantherlake_hybrid";
goto lnl_common;

View File

@@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,
{
u64 val;
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
WARN_ON_ONCE(is_hybrid() &&
hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
val = hybrid_var(event->pmu, pebs_data_source)[dse];

View File

@@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),

View File

@@ -23,7 +23,6 @@
#define AMD_NODE0_PCI_SLOT 0x18
struct pci_dev *amd_node_get_func(u16 node, u8 func);
struct pci_dev *amd_node_get_root(u16 node);
static inline u16 amd_num_nodes(void)
{

View File

@@ -150,12 +150,12 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
#define INTEL_NOVALAKE IFM(18, 0x01)
#define INTEL_NOVALAKE_L IFM(18, 0x03)
#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */
#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */
/* "Small Core" Processors (Atom/E-Core) */

View File

@@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
KCFI_REFERENCE(clear_page_orig);
KCFI_REFERENCE(clear_page_rep);
KCFI_REFERENCE(clear_page_erms);
static inline void clear_page(void *page)
{

View File

@@ -2,6 +2,10 @@
#ifndef _ASM_RUNTIME_CONST_H
#define _ASM_RUNTIME_CONST_H
#ifdef MODULE
#error "Cannot use runtime-const infrastructure from modules"
#endif
#ifdef __ASSEMBLY__
.macro RUNTIME_CONST_PTR sym reg

View File

@@ -12,12 +12,12 @@
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/runtime-const.h>
/*
* Virtual variable: there's no actual backing store for this,
* it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
*/
#ifdef MODULE
#define runtime_const_ptr(sym) (sym)
#else
#include <asm/runtime-const.h>
#endif
extern unsigned long USER_PTR_MAX;
#ifdef CONFIG_ADDRESS_MASKING

View File

@@ -34,62 +34,6 @@ struct pci_dev *amd_node_get_func(u16 node, u8 func)
return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
}
#define DF_BLK_INST_CNT 0x040
#define DF_CFG_ADDR_CNTL_LEGACY 0x084
#define DF_CFG_ADDR_CNTL_DF4 0xC04
#define DF_MAJOR_REVISION GENMASK(27, 24)
static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
{
u32 reg;
/*
* Revision fields added for DF4 and later.
*
* Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
*/
if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg))
return 0;
if (reg & DF_MAJOR_REVISION)
return DF_CFG_ADDR_CNTL_DF4;
return DF_CFG_ADDR_CNTL_LEGACY;
}
struct pci_dev *amd_node_get_root(u16 node)
{
struct pci_dev *root;
u16 cntl_off;
u8 bus;
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
return NULL;
/*
* D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
* Bits [7:0] (SecBusNum) holds the bus number of the root device for
* this Data Fabric instance. The segment, device, and function will be 0.
*/
struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
if (!df_f0)
return NULL;
cntl_off = get_cfg_addr_cntl_offset(df_f0);
if (!cntl_off)
return NULL;
if (pci_read_config_byte(df_f0, cntl_off, &bus))
return NULL;
/* Grab the pointer for the actual root device instance. */
root = pci_get_domain_bus_and_slot(0, bus, 0);
pci_dbg(root, "is root for AMD node %u\n", node);
return root;
}
static struct pci_dev **amd_roots;
/* Protect the PCI config register pairs used for SMN. */
@@ -274,51 +218,21 @@ DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
static int amd_cache_roots(void)
static struct pci_dev *get_next_root(struct pci_dev *root)
{
u16 node, num_nodes = amd_num_nodes();
amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
if (!amd_roots)
return -ENOMEM;
for (node = 0; node < num_nodes; node++)
amd_roots[node] = amd_node_get_root(node);
return 0;
}
static int reserve_root_config_spaces(void)
{
struct pci_dev *root = NULL;
struct pci_bus *bus = NULL;
while ((bus = pci_find_next_bus(bus))) {
/* Root device is Device 0 Function 0 on each Primary Bus. */
root = pci_get_slot(bus, 0);
if (!root)
while ((root = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, root))) {
/* Root device is Device 0 Function 0. */
if (root->devfn)
continue;
if (root->vendor != PCI_VENDOR_ID_AMD &&
root->vendor != PCI_VENDOR_ID_HYGON)
continue;
pci_dbg(root, "Reserving PCI config space\n");
/*
* There are a few SMN index/data pairs and other registers
* that shouldn't be accessed by user space.
* So reserve the entire PCI config space for simplicity rather
* than covering specific registers piecemeal.
*/
if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
pci_err(root, "Failed to reserve config space\n");
return -EEXIST;
}
break;
}
smn_exclusive = true;
return 0;
return root;
}
static bool enable_dfs;
@@ -332,7 +246,8 @@ __setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
static int __init amd_smn_init(void)
{
int err;
u16 count, num_roots, roots_per_node, node, num_nodes;
struct pci_dev *root;
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
return 0;
@@ -342,13 +257,48 @@ static int __init amd_smn_init(void)
if (amd_roots)
return 0;
err = amd_cache_roots();
if (err)
return err;
num_roots = 0;
root = NULL;
while ((root = get_next_root(root))) {
pci_dbg(root, "Reserving PCI config space\n");
err = reserve_root_config_spaces();
if (err)
return err;
/*
* There are a few SMN index/data pairs and other registers
* that shouldn't be accessed by user space. So reserve the
* entire PCI config space for simplicity rather than covering
* specific registers piecemeal.
*/
if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
pci_err(root, "Failed to reserve config space\n");
return -EEXIST;
}
num_roots++;
}
pr_debug("Found %d AMD root devices\n", num_roots);
if (!num_roots)
return -ENODEV;
num_nodes = amd_num_nodes();
amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
if (!amd_roots)
return -ENOMEM;
roots_per_node = num_roots / num_nodes;
count = 0;
node = 0;
root = NULL;
while (node < num_nodes && (root = get_next_root(root))) {
/* Use one root for each node and skip the rest. */
if (count++ % roots_per_node)
continue;
pci_dbg(root, "is root for AMD node %u\n", node);
amd_roots[node++] = root;
}
if (enable_dfs) {
debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
@@ -358,6 +308,8 @@ static int __init amd_smn_init(void)
debugfs_create_file("value", 0600, debugfs_dir, NULL, &smn_value_fops);
}
smn_exclusive = true;
return 0;
}

View File

@@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
case 0x50 ... 0x5f:
case 0x90 ... 0xaf:
case 0x80 ... 0xaf:
case 0xc0 ... 0xcf:
setup_force_cpu_cap(X86_FEATURE_ZEN6);
break;
@@ -1035,8 +1035,19 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
}
}
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
{},
};
static void init_amd_zen5(struct cpuinfo_x86 *c)
{
if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
clear_cpu_cap(c, X86_FEATURE_RDSEED);
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
}
}
static void init_amd(struct cpuinfo_x86 *c)

View File

@@ -78,6 +78,10 @@
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Used for modules: built-in code uses runtime constants */
unsigned long USER_PTR_MAX;
EXPORT_SYMBOL(USER_PTR_MAX);
u32 elf_hwcap2 __read_mostly;
/* Number of siblings per CPU package */
@@ -2579,7 +2583,7 @@ void __init arch_cpu_finalize_init(void)
alternative_instructions();
if (IS_ENABLED(CONFIG_X86_64)) {
unsigned long USER_PTR_MAX = TASK_SIZE_MAX;
USER_PTR_MAX = TASK_SIZE_MAX;
/*
* Enable this when LAM is gated on LASS support

View File

@@ -220,10 +220,12 @@ static bool need_sha_check(u32 cur_rev)
case 0xaa001: return cur_rev <= 0xaa00116; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
case 0xb0021: return cur_rev <= 0xb002146; break;
case 0xb0081: return cur_rev <= 0xb008111; break;
case 0xb1010: return cur_rev <= 0xb101046; break;
case 0xb2040: return cur_rev <= 0xb204031; break;
case 0xb4040: return cur_rev <= 0xb404031; break;
case 0xb6000: return cur_rev <= 0xb600031; break;
case 0xb6080: return cur_rev <= 0xb608031; break;
case 0xb7000: return cur_rev <= 0xb700031; break;
default: break;
}
@@ -233,13 +235,31 @@ static bool need_sha_check(u32 cur_rev)
return true;
}
static bool cpu_has_entrysign(void)
{
unsigned int fam = x86_family(bsp_cpuid_1_eax);
unsigned int model = x86_model(bsp_cpuid_1_eax);
if (fam == 0x17 || fam == 0x19)
return true;
if (fam == 0x1a) {
if (model <= 0x2f ||
(0x40 <= model && model <= 0x4f) ||
(0x60 <= model && model <= 0x6f))
return true;
}
return false;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17)
if (!cpu_has_entrysign())
return true;
if (!need_sha_check(cur_rev))

View File

@@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)
!fpregs_state_valid(fpu, smp_processor_id()))
os_xrstor_supervisor(fpu->fpstate);
/* Ensure XFD state is in sync before reloading XSTATE */
xfd_update_state(fpu->fpstate);
/* Reset user states in registers. */
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);

View File

@@ -2701,7 +2701,7 @@ st: if (is_imm8(insn->off))
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
if (bpf_prog_was_classic(bpf_prog) &&
!capable(CAP_SYS_ADMIN)) {
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
u8 *ip = image + addrs[i - 1];
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))

View File

@@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
}
if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_IOERR;
bio->bi_status = BLK_STS_INVAL;
goto fail;
}

View File

@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
if (mrrm->header.revision != 1)
return -EINVAL;
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;

View File

@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
cancel_delayed_work_sync(&dev->switch_brightness_work);
}
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);

View File

@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
if (error) {
input_free_device(input);
goto err_remove_fs;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:

View File

@@ -750,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
/*
* Disregard _CPC if the number of entries in the return pachage is not
* Disregard _CPC if the number of entries in the return package is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/

View File

@@ -49,6 +49,7 @@ struct acpi_fan_fst {
};
struct acpi_fan {
acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
@@ -59,14 +60,14 @@ struct acpi_fan {
struct device_attribute fine_grain_control;
};
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
int devm_acpi_fan_create_hwmon(struct acpi_device *device);
int devm_acpi_fan_create_hwmon(struct device *dev);
#else
static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
#endif
#endif

View File

@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
status = acpi_fan_get_fst(acpi_dev, &fst);
status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;

View File

@@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
return -ENODEV;
}
status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status))
return -EIO;
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
ret = -EINVAL;
if (!obj)
return -ENODATA;
if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
ret = -EPROTO;
goto err;
}
if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EPROTO;
goto err;
}
@@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
status = acpi_fan_get_fst(device, &fst);
status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
if (!device)
return -ENODEV;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
@@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;

View File

@@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
ret = acpi_fan_get_fst(adev, &fst);
ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
int devm_acpi_fan_create_hwmon(struct acpi_device *device)
int devm_acpi_fan_create_hwmon(struct device *dev)
{
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct device *hdev;
hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
&acpi_fan_hwmon_chip_info, NULL);
hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hdev);
}

View File

@@ -487,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
if (result)
return result;
battery->present = state & (1 << battery->id);
battery->present = !!(state & (1 << battery->id));
if (!battery->present)
return 0;

View File

@@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* Baud Rate field. If this field is zero or not present, Configured
* Baud Rate is used.
*/
if (table->precise_baudrate)
if (table->header.revision >= 4 && table->precise_baudrate)
baud_rate = table->precise_baudrate;
else switch (table->baud_rate) {
case 0:

View File

@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
lock_key, lock_name);
return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
lock_key, lock_name);
return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);

View File

@@ -535,14 +535,29 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
ref_array = prop->pointer;
ref = &ref_array[index];
refnode = software_node_fwnode(ref->node);
/*
* A software node can reference other software nodes or firmware
* nodes (which are the abstraction layer sitting on top of them).
* This is done to ensure we can create references to static software
* nodes before they're registered with the firmware node framework.
* At the time the reference is being resolved, we expect the swnodes
* in question to already have been registered and to be backed by
* a firmware node. This is why we use the fwnode API below to read the
* relevant properties and bump the reference count.
*/
if (ref->swnode)
refnode = software_node_fwnode(ref->swnode);
else if (ref->fwnode)
refnode = ref->fwnode;
else
return -EINVAL;
if (!refnode)
return -ENOENT;
if (nargs_prop) {
error = property_entry_read_int_array(ref->node->properties,
nargs_prop, sizeof(u32),
&nargs_prop_val, 1);
error = fwnode_property_read_u32(refnode, nargs_prop, &nargs_prop_val);
if (error)
return error;
@@ -555,7 +570,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (!args)
return 0;
args->fwnode = software_node_get(refnode);
args->fwnode = fwnode_handle_get(refnode);
args->nargs = nargs;
for (i = 0; i < nargs; i++)
@@ -635,7 +650,10 @@ software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
ref = prop->pointer;
return software_node_get(software_node_fwnode(ref[0].node));
if (!ref->swnode)
return NULL;
return software_node_get(software_node_fwnode(ref->swnode));
}
static struct fwnode_handle *

View File

@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
struct device_node *np;
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
np = core->dev.of_node;
if (np && !of_device_is_available(np))
continue;
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)

View File

@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
.dma_alignment = dev->blocksize - 1,
};
struct nullb *nullb;

View File

@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);

View File

@@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
@@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_tx_handle(data);
}
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
btintel_pcie_msix_gp0_handler(data);
/*
* Before sending the interrupt the HW disables it to prevent a nested
* interrupt. This is done by writing 1 to the corresponding bit in

View File

@@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
/* set drv_pmctrl if BT is closed before doing reset */
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
sdio_enable_func(bdev->func);
btmtksdio_drv_pmctrl(bdev);
}
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
goto err;
}
/* set fw_pmctrl back if BT is closed after doing reset */
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
btmtksdio_fw_pmctrl(bdev);
sdio_disable_func(bdev->func);
}
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);

View File

@@ -79,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);

View File

@@ -212,6 +212,7 @@ struct btnxpuart_dev {
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
struct reset_control *pdn;
struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
@@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
reset_control_deassert(nxpdev->pdn);
nxpdev->hdev = hdev;
nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);

View File

@@ -625,8 +625,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
if (!len)
if (!len) {
kvfree(ptr);
return -EPERM;
}
*_buf = ptr;
return len;

View File

@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {

View File

@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
struct aml_data *aml_data = hu->priv;
int err;
aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
aml_recv_pkts,
ARRAY_SIZE(aml_recv_pkts));
if (IS_ERR(aml_data->rx_skb)) {

View File

@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);

View File

@@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);

View File

@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))

View File

@@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {

View File

@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);

View File

@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
mrvl_recv_pkts,
ARRAY_SIZE(mrvl_recv_pkts));
mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
mrvl_recv_pkts,
ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);

View File

@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);

View File

@@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);

View File

@@ -162,7 +162,7 @@ struct h4_recv_pkt {
int h4_init(void);
int h4_deinit(void);
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif

View File

@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -303,8 +304,8 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
strcpy(drv->states[0].name, "WFI");
strcpy(drv->states[0].desc, "RISC-V WFI");
strscpy(drv->states[0].name, "WFI");
strscpy(drv->states[0].desc, "RISC-V WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver

View File

@@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/*
* Use a physical idle state, not busy polling, unless a timer
* is going to trigger soon enough.
* is going to trigger soon enough or the exit latency of the
* idle state in question is greater than the predicted idle
* duration.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
s->target_residency_ns <= data->next_timer_ns) {
s->target_residency_ns <= data->next_timer_ns &&
s->exit_latency_ns <= predicted_ns) {
predicted_ns = s->target_residency_ns;
idx = i;
break;

View File

@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
clk_disable_unprepare(acry_dev->clk);
return rc;
}
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);

View File

@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return fence->ops->get_driver_name(fence);
return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}

View File

@@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
if (!dpll_pin_available(pin)) {
nlmsg_free(msg);
return -ENODEV;
}
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
return ret;
}
if (IS_ERR(pin)) {
nlmsg_free(msg);
return PTR_ERR(pin);
}
if (!dpll_pin_available(pin)) {
nlmsg_free(msg);
return -ENODEV;
}
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
return ret;
}
genlmsg_end(msg, hdr);
@@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
dpll = dpll_device_find_from_nlattr(info);
if (!IS_ERR(dpll)) {
ret = dpll_msg_add_dev_handle(msg, dpll);
if (ret) {
nlmsg_free(msg);
return ret;
}
if (IS_ERR(dpll)) {
nlmsg_free(msg);
return PTR_ERR(dpll);
}
ret = dpll_msg_add_dev_handle(msg, dpll);
if (ret) {
nlmsg_free(msg);
return ret;
}
genlmsg_end(msg, hdr);

View File

@@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
}
is_diff = zl3073x_out_is_diff(zldev, out);
is_enabled = zl3073x_out_is_enabled(zldev, out);
is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
}
/* Skip N-pin if the corresponding input/output is differential */

View File

@@ -433,7 +433,7 @@ static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
phys_addr_t pfn;
int err;
if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
return;
mci = priv->mci[ctl_num];

View File

@@ -723,6 +723,7 @@ struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->set_config = gpio_fwd_set_config;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;

View File

@@ -50,25 +50,6 @@ static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
return ioread32(gpio->base + offs);
}
static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
u32 val)
{
iowrite32(val, gpio->base + offs);
}
static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
u32 mask, u32 val)
{
u32 r;
guard(gpio_generic_lock_irqsave)(&gpio->chip);
r = tb10x_reg_read(gpio, offs);
r = (r & ~mask) | (val & mask);
tb10x_reg_write(gpio, offs, r);
}
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);

View File

@@ -31,7 +31,7 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
gdev_node = to_software_node(fwnode);
if (!gdev_node || !gdev_node->name)
return ERR_PTR(-EINVAL);
goto fwnode_lookup;
/*
* Check for a special node that identifies undefined GPIOs, this is
@@ -41,7 +41,8 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
!strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
return ERR_PTR(-ENOENT);
gdev = gpio_device_find_by_label(gdev_node->name);
fwnode_lookup:
gdev = gpio_device_find_by_fwnode(fwnode);
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}

View File

@@ -5355,6 +5355,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
struct gpio_device *gdev;
loff_t index = *pos;
s->private = NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
@@ -5388,7 +5390,11 @@ static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void gpiolib_seq_stop(struct seq_file *s, void *v)
{
struct gpiolib_seq_priv *priv = s->private;
struct gpiolib_seq_priv *priv;
priv = s->private;
if (!priv)
return;
srcu_read_unlock(&gpio_devices_srcu, priv->idx);
kfree(priv);

View File

@@ -245,7 +245,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE

View File

@@ -1267,6 +1267,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
/* VM entity stopped if process killed, don't clear freed pt bo */
if (!amdgpu_vm_ready(vm))
return 0;
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@@ -5243,10 +5243,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
r = amdgpu_dpm_notify_rlc_state(adev, false);
if (r)
return r;
return 0;
}

View File

@@ -2632,9 +2632,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
if (amdgpu_acpi_should_gpu_reset(adev)) {
amdgpu_device_lock_reset_domain(adev->reset_domain);
r = amdgpu_asic_reset(adev);
amdgpu_device_unlock_reset_domain(adev->reset_domain);
return r;
}
return 0;
}

View File

@@ -2355,8 +2355,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
} else {
/* don't try again */
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
}
mutex_lock(&psp->securedisplay_context.mutex);

View File

@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
case IP_VERSION(6, 1, 1):
return adev->pm.fw_version < 0x0a640500;
default:
return false;
}
}
static int vpe_get_dpm_level(struct amdgpu_device *adev)
{
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
return 0;
return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
}
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
if (fences)
goto reschedule;
if (fences == 0)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
else
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
goto reschedule;
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
return;
reschedule:
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)

Some files were not shown because too many files have changed in this diff Show More