Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf after 6.18-rc4

Cross-merge BPF and other fixes after downstream PR.
No conflicts.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2025-11-03 14:59:55 -08:00
500 changed files with 4677 additions and 2435 deletions

View File

@@ -27,6 +27,7 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
@@ -643,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rae Moar <raemoar63@gmail.com> <rmoar@google.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>

View File

@@ -2036,6 +2036,10 @@ S: Botanicka' 68a
S: 602 00 Brno
S: Czech Republic
N: Karsten Keil
E: isdn@linux-pingi.de
D: ISDN subsystem maintainer
N: Jakob Kemi
E: jakob.kemi@telia.com
D: V4L W9966 Webcam driver

View File

@@ -180,9 +180,9 @@ allOf:
then:
properties:
reg:
minItems: 2
maxItems: 2
reg-names:
minItems: 2
maxItems: 2
else:
properties:
reg:

View File

@@ -142,7 +142,9 @@ allOf:
required:
- orientation-switch
then:
$ref: /schemas/usb/usb-switch.yaml#
allOf:
- $ref: /schemas/usb/usb-switch.yaml#
- $ref: /schemas/usb/usb-switch-ports.yaml#
unevaluatedProperties: false

View File

@@ -24,6 +24,10 @@ properties:
- enum:
- qcom,qcs8300-qmp-ufs-phy
- const: qcom,sa8775p-qmp-ufs-phy
- items:
- enum:
- qcom,kaanapali-qmp-ufs-phy
- const: qcom,sm8750-qmp-ufs-phy
- enum:
- qcom,msm8996-qmp-ufs-phy
- qcom,msm8998-qmp-ufs-phy

View File

@@ -125,7 +125,9 @@ allOf:
contains:
const: google,gs101-usb31drd-phy
then:
$ref: /schemas/usb/usb-switch.yaml#
allOf:
- $ref: /schemas/usb/usb-switch.yaml#
- $ref: /schemas/usb/usb-switch-ports.yaml#
properties:
clocks:

View File

@@ -197,6 +197,7 @@ allOf:
- renesas,rcar-gen2-scif
- renesas,rcar-gen3-scif
- renesas,rcar-gen4-scif
- renesas,rcar-gen5-scif
then:
properties:
interrupts:

View File

@@ -32,7 +32,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 4
items:
enum: [1, 2, 3, 4]
@@ -48,7 +48,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 5
items:
enum: [1, 2, 3, 4, 5]

View File

@@ -14,9 +14,14 @@ allOf:
properties:
compatible:
enum:
- cdns,spi-r1p6
- xlnx,zynq-spi-r1p6
oneOf:
- enum:
- xlnx,zynq-spi-r1p6
- items:
- enum:
- xlnx,zynqmp-spi-r1p6
- xlnx,versal-net-spi-r1p6
- const: cdns,spi-r1p6
reg:
maxItems: 1

View File

@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
- rockchip,rk3506-spi
- rockchip,rk3528-spi
- rockchip,rk3562-spi
- rockchip,rk3568-spi

View File

@@ -15,6 +15,7 @@ select:
compatible:
contains:
enum:
- qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
required:
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
- qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc

View File

@@ -76,6 +76,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -89,13 +89,21 @@ required:
- reg
- "#address-cells"
- "#size-cells"
- dma-ranges
- ranges
- clocks
- clock-names
- interrupts
- power-domains
allOf:
- if:
properties:
compatible:
const: fsl,imx8mp-dwc3
then:
required:
- dma-ranges
additionalProperties: false
examples:

View File

@@ -52,6 +52,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
- if:
required:
- mode-switch

View File

@@ -46,6 +46,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -91,6 +91,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -81,6 +81,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -68,6 +68,7 @@ properties:
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
- qcom,x1e80100-dwc3
- qcom,x1e80100-dwc3-mp
- const: qcom,snps-dwc3
reg:
@@ -460,8 +461,10 @@ allOf:
then:
properties:
interrupts:
minItems: 4
maxItems: 5
interrupt-names:
minItems: 4
items:
- const: dwc_usb3
- const: pwr_event

View File

@@ -60,6 +60,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
additionalProperties: false

View File

@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: usb-switch.yaml#
- $ref: usb-switch-ports.yaml#
properties:
compatible:

View File

@@ -0,0 +1,68 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: USB Orientation and Mode Switches Ports Graph Properties
maintainers:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
description:
Ports Graph properties for devices handling USB mode and orientation switching.
properties:
port:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
A port node to link the device to a TypeC controller for the purpose of
handling altmode muxing and orientation switching.
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Super Speed (SS) Output endpoint to the Type-C connector
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
Super Speed (SS) Input endpoint from the Super-Speed PHY
unevaluatedProperties: false
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
oneOf:
- required:
- port
- required:
- ports
additionalProperties: true

View File

@@ -25,56 +25,4 @@ properties:
description: Possible handler of SuperSpeed signals retiming
type: boolean
port:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
A port node to link the device to a TypeC controller for the purpose of
handling altmode muxing and orientation switching.
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Super Speed (SS) Output endpoint to the Type-C connector
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
description:
Super Speed (SS) Input endpoint from the Super-Speed PHY
unevaluatedProperties: false
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
uniqueItems: true
items:
maximum: 8
oneOf:
- required:
- port
- required:
- ports
additionalProperties: true

View File

@@ -605,6 +605,8 @@ operations:
reply: &pin-attrs
attributes:
- id
- module-name
- clock-id
- board-label
- panel-label
- package-label

View File

@@ -11,6 +11,7 @@ found on https://linux-ax25.in-berlin.de.
There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
of the message, the subject field is ignored. You don't need to be
subscribed to post but of course that means you might miss an answer.
linux-hams+subscribe@vger.kernel.org or use the web interface at
https://vger.kernel.org. The subject and body of the message are
ignored. You don't need to be subscribed to post but of course that
means you might miss an answer.

View File

@@ -137,16 +137,20 @@ d. Checksum offload header v5
Checksum offload header fields are in big endian format.
Packet format::
Bit 0 - 6 7 8-15 16-31
Function Header Type Next Header Checksum Valid Reserved
Header Type is to indicate the type of header, this usually is set to CHECKSUM
Header types
= ==========================================
= ===============
0 Reserved
1 Reserved
2 checksum header
= ===============
Checksum Valid is to indicate whether the header checksum is valid. Value of 1
implies that checksum is calculated on this packet and is valid, value of 0
@@ -183,9 +187,11 @@ rmnet in a single linear skb. rmnet will process the individual
packets and either ACK the MAP command or deliver the IP packet to the
network stack as needed
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
Packet format::
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
3. Userspace configuration
==========================

View File

@@ -96,9 +96,8 @@ needed to these network configuration daemons to make sure that an IP is
received only on the 'failover' device.
Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
Debian cloud images:
Debian cloud images::
::
@@ -27,6 +27,8 @@ do_setup() {
local working="$cfgdir/.$INTERFACE"
local final="$cfgdir/$INTERFACE"
@@ -172,9 +171,8 @@ appropriate FDB entry is added.
The following script is executed on the destination hypervisor once migration
completes, and it reattaches the VF to the VM and brings down the virtio-net
interface.
interface::
::
# reattach-vf.sh
#!/bin/bash

View File

@@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
Please send bug reports to Matt Mackall <mpm@selenic.com>
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
Introduction:
=============

View File

@@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
M: Linus Walleij <linus.walleij@linaro.org>
R: Drew Fustini <fustini@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -3841,6 +3845,7 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
M: Luke D. Jones <luke@ljones.dev>
M: Denis Benato <benato.denis96@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://asus-linux.org/
@@ -13112,6 +13117,15 @@ F: include/uapi/linux/io_uring.h
F: include/uapi/linux/io_uring/
F: io_uring/
IO_URING ZCRX
M: Pavel Begunkov <asml.silence@gmail.com>
L: io-uring@vger.kernel.org
L: netdev@vger.kernel.org
T: git https://github.com/isilence/linux.git zcrx/for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
S: Maintained
F: io_uring/zcrx.*
IPMI SUBSYSTEM
M: Corey Minyard <corey@minyard.net>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -13247,10 +13261,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
F: drivers/infiniband/ulp/isert
ISDN/CMTP OVER BLUETOOTH
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
S: Odd Fixes
S: Orphan
W: http://www.isdn4linux.de
F: Documentation/isdn/
F: drivers/isdn/capi/
@@ -13259,10 +13271,8 @@ F: include/uapi/linux/isdn/
F: net/bluetooth/cmtp/
ISDN/mISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
W: http://www.isdn4linux.de
F: drivers/isdn/Kconfig
F: drivers/isdn/Makefile
@@ -13416,9 +13426,12 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
M: Nathan Chancellor <nathan@kernel.org>
M: Nicolas Schier <nsc@kernel.org>
L: linux-kbuild@vger.kernel.org
S: Orphan
S: Odd Fixes
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
@@ -13603,7 +13616,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendan.higgins@linux.dev>
M: David Gow <davidgow@google.com>
R: Rae Moar <rmoar@google.com>
R: Rae Moar <raemoar63@gmail.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@@ -14395,6 +14408,7 @@ F: tools/memory-model/
LINUX-NEXT TREE
M: Stephen Rothwell <sfr@canb.auug.org.au>
M: Mark Brown <broonie@kernel.org>
L: linux-next@vger.kernel.org
S: Supported
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
@@ -21318,6 +21332,7 @@ F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
L: wcn36xx@lists.infradead.org
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
F: drivers/net/wireless/ath/wcn36xx/
@@ -26886,7 +26901,7 @@ S: Maintained
F: drivers/vfio/cdx/*
VFIO DRIVER
M: Alex Williamson <alex.williamson@redhat.com>
M: Alex Williamson <alex@shazbot.org>
L: kvm@vger.kernel.org
S: Maintained
T: git https://github.com/awilliam/linux-vfio.git
@@ -27049,7 +27064,7 @@ T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vimc/*
VIRT LIB
M: Alex Williamson <alex.williamson@redhat.com>
M: Alex Williamson <alex@shazbot.org>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@@ -77,6 +77,14 @@ &i2c0 {
/delete-property/ pinctrl-0;
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration

View File

@@ -13,7 +13,16 @@ &hdmi {
clock-names = "pixel", "hdmi";
};
&pm {
clocks = <&firmware_clocks 5>,
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
<&clocks BCM2835_CLOCK_H264>,
<&clocks BCM2835_CLOCK_ISP>;
clock-names = "v3d", "peri_image", "h264", "isp";
};
&v3d {
clocks = <&firmware_clocks 5>;
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
};

View File

@@ -326,6 +326,8 @@ gicv2: interrupt-controller@7fff9000 {
<0x7fffe000 0x2000>;
interrupt-controller;
#address-cells = <0>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_HIGH)>;
#interrupt-cells = <3>;
};

View File

@@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
if (pte_sw_dirty(pte))
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}

View File

@@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
from != folio_page(src, 0))
return;
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
folio_try_hugetlb_mte_tagging(dst);
/*
* Populate tags for all subpages.
@@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
/* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to));
/*
* Most of the time it's a new page that shouldn't have been
* tagged yet. However, folio migration can end up reusing the
* same page without untagging it. Ignore the warning if the
* page is already tagged.
*/
try_page_mte_tagging(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);

View File

@@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const u8 priv_sp = bpf2a64[PRIVATE_SP];
@@ -1757,8 +1758,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx);
dst = tmp3;
}
if (dst == fp) {
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;

View File

@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
if (test_and_set_bit(PG_dcache_clean, &folio->flags))
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
return;
icache_inv_range(address, address + nr*PAGE_SIZE);

View File

@@ -20,8 +20,8 @@
static inline void flush_dcache_folio(struct folio *folio)
{
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio

View File

@@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = {
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
.flags = IORESOURCE_IO
},
{
.name = "dma page reg",
@@ -213,7 +213,7 @@ void __init plat_mem_setup(void)
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
request_resource(&ioport_resource, standard_io_resources+i);
insert_resource(&ioport_resource, standard_io_resources + i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.

View File

@@ -230,8 +230,7 @@ void __init mips_pcibios_init(void)
}
/* PIIX4 ACPI starts at 0x1000 */
if (controller->io_resource->start < 0x00001000UL)
controller->io_resource->start = 0x00001000UL;
PCIBIOS_MIN_IO = 0x1000;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;

View File

@@ -84,15 +84,9 @@
.endm
#ifdef CONFIG_SMP
#ifdef CONFIG_32BIT
#define PER_CPU_OFFSET_SHIFT 2
#else
#define PER_CPU_OFFSET_SHIFT 3
#endif
.macro asm_per_cpu dst sym tmp
lw \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
slli \tmp, \tmp, RISCV_LGPTR
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)

View File

@@ -31,6 +31,8 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
extern const struct seq_operations cpuinfo_op;
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];

View File

@@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
return pair->value == other_pair->value;
}
#ifdef CONFIG_MMU
void riscv_hwprobe_register_async_probe(void);
void riscv_hwprobe_complete_async_probe(void);
#else
static inline void riscv_hwprobe_register_async_probe(void) {}
static inline void riscv_hwprobe_complete_async_probe(void) {}
#endif
#endif

View File

@@ -69,6 +69,8 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
#define MAX_POSSIBLE_PHYSMEM_BITS 56
/*
* rv64 PTE format:
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0

View File

@@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
#define pgprot_dmacoherent pgprot_writecombine
/*
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in

View File

@@ -12,6 +12,12 @@ struct vdso_arch_data {
/* Boolean indicating all CPUs have the same static hwprobe values. */
__u8 homogeneous_cpus;
/*
* A gate to check and see if the hwprobe data is actually ready, as
* probing is deferred to avoid boot slowdowns.
*/
__u8 ready;
};
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */

View File

@@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
return -ENODEV;
}
if (!of_device_is_available(node)) {
pr_info("CPU with hartid=%lu is not available\n", *hart);
if (!of_device_is_available(node))
return -ENODEV;
}
if (of_property_read_string(node, "riscv,isa-base", &isa))
goto old_interface;

View File

@@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
{
int cpu;
u32 prev_vlenb = 0;
u32 vlenb;
u32 vlenb = 0;
/* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
/* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
return 0;

View File

@@ -40,6 +40,17 @@ enum ipi_message_type {
IPI_MAX
};
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
@@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
/* Request IPIs */
for (i = 0; i < nr_ipi; i++) {
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
"IPI", &ipi_dummy_dev);
ipi_names[i], &ipi_dummy_dev);
WARN_ON(err);
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
@@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
riscv_ipi_enable();
}
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
{
unsigned int cpu, i;

View File

@@ -5,6 +5,9 @@
* more details.
*/
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/once.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
@@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
bool first = true;
int cpu;
if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
pair->key != RISCV_HWPROBE_KEY_MIMPID &&
pair->key != RISCV_HWPROBE_KEY_MARCHID)
goto out;
for_each_cpu(cpu, cpus) {
u64 cpu_id;
@@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
}
}
out:
pair->value = id;
}
@@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
return 0;
}
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
cpus_user, flags);
return hwprobe_get_values(pairs, pair_count, cpusetsize,
cpus_user, flags);
}
#ifdef CONFIG_MMU
static int __init init_hwprobe_vdso_data(void)
static DECLARE_COMPLETION(boot_probes_done);
static atomic_t pending_boot_probes = ATOMIC_INIT(1);
void riscv_hwprobe_register_async_probe(void)
{
atomic_inc(&pending_boot_probes);
}
void riscv_hwprobe_complete_async_probe(void)
{
if (atomic_dec_and_test(&pending_boot_probes))
complete(&boot_probes_done);
}
static int complete_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
wait_for_completion(&boot_probes_done);
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
@@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
/*
* Make sure all the VDSO values are visible before we look at them.
* This pairs with the implicit "no speculativly visible accesses"
* barrier in the VDSO hwprobe code.
*/
smp_wmb();
avd->ready = true;
return 0;
}
static int __init init_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
/*
* Prevent the vDSO cached values from being used, as they're not ready
* yet.
*/
avd->ready = false;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
#else
static int complete_hwprobe_vdso_data(void) { return 0; }
#endif /* CONFIG_MMU */
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
cpus_user, flags);
return hwprobe_get_values(pairs, pair_count, cpusetsize,
cpus_user, flags);
}
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)

View File

@@ -379,6 +379,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
riscv_hwprobe_complete_async_probe();
return 0;
}
@@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
riscv_hwprobe_register_async_probe();
if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
pr_warn("Failed to create vec_unalign_check kthread\n");
riscv_hwprobe_complete_async_probe();
}
}
/*

View File

@@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
* homogeneous, then this function can handle requests for arbitrary
* masks.
*/
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */

View File

@@ -158,7 +158,6 @@ config S390
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_THP_SWAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2

View File

@@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
@@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
@@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@@ -33,7 +33,6 @@ CONFIG_NET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y

View File

@@ -169,11 +169,18 @@ struct kmac_sha2_ctx {
u64 buflen[2];
};
enum async_op {
OP_NOP = 0,
OP_UPDATE,
OP_FINAL,
OP_FINUP,
};
/* phmac request context */
struct phmac_req_ctx {
struct hash_walk_helper hwh;
struct kmac_sha2_ctx kmac_ctx;
bool final;
enum async_op async_op;
};
/*
@@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->async_op = OP_UPDATE;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req->nbytes = 0;
req_ctx->final = true;
req_ctx->async_op = OP_FINAL;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)
if (rc)
goto out;
req_ctx->async_op = OP_FINUP;
/* Try synchronous operations if no active engine usage */
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
req->nbytes = 0;
req_ctx->async_op = OP_FINAL;
}
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
if (!rc && req_ctx->async_op == OP_FINAL &&
!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
@@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->final = true;
/* req->async_op has been set to either OP_FINUP or OP_FINAL */
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
/*
* Three kinds of requests come in here:
* update when req->nbytes > 0 and req_ctx->final is false
* final when req->nbytes = 0 and req_ctx->final is true
* finup when req->nbytes > 0 and req_ctx->final is true
* For update and finup the hwh walk needs to be prepared and
* up to date but the actual nr of bytes in req->nbytes may be
* any non zero number. For final there is no hwh walk needed.
* 1. req->async_op == OP_UPDATE with req->nbytes > 0
* 2. req->async_op == OP_FINUP with req->nbytes > 0
* 3. req->async_op == OP_FINAL
* For update and finup the hwh walk has already been prepared
* by the caller. For final there is no hwh walk needed.
*/
if (req->nbytes) {
switch (req_ctx->async_op) {
case OP_UPDATE:
case OP_FINUP:
rc = phmac_kmac_update(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
hwh_advance(hwh, rc);
goto out;
}
req->nbytes = 0;
}
if (req_ctx->final) {
if (req_ctx->async_op == OP_UPDATE)
break;
req_ctx->async_op = OP_FINAL;
fallthrough;
case OP_FINAL:
rc = phmac_kmac_final(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
cond_resched();
return -ENOSPC;
}
break;
default:
/* unknown/unsupported/unimplemented asynch op */
return -EOPNOTSUPP;
}
out:
if (rc || req_ctx->final)
if (rc || req_ctx->async_op == OP_FINAL)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("request complete with rc=%d\n", rc);
local_bh_disable();

View File

@@ -145,7 +145,6 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 irqs_registered : 1;
u8 tid_avail : 1;
u8 rtr_avail : 1; /* Relaxed translation allowed */
unsigned int devfn; /* DEVFN part of the RID*/

View File

@@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)
static int add_marker(unsigned long start, unsigned long end, const char *name)
{
size_t oldsize, newsize;
struct addr_marker *new;
size_t newsize;
oldsize = markers_cnt * sizeof(*markers);
newsize = oldsize + 2 * sizeof(*markers);
if (!oldsize)
markers = kvmalloc(newsize, GFP_KERNEL);
else
markers = kvrealloc(markers, newsize, GFP_KERNEL);
if (!markers)
goto error;
newsize = (markers_cnt + 2) * sizeof(*markers);
new = kvrealloc(markers, newsize, GFP_KERNEL);
if (!new)
return -ENOMEM;
markers = new;
markers[markers_cnt].is_start = 1;
markers[markers_cnt].start_address = start;
markers[markers_cnt].size = end - start;
@@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)
markers[markers_cnt].name = name;
markers_cnt++;
return 0;
error:
markers_cnt = 0;
return -ENOMEM;
}
static int pt_dump_init(void)

View File

@@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
pci_dev_lock(pdev);
device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
driver->err_handler->resume(pdev);
pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
pci_dev_unlock(pdev);
device_unlock(&pdev->dev);
zpci_report_status(zdev, "recovery", status_str);
return ers_res;

View File

@@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)
else
rc = zpci_set_airq(zdev);
if (!rc)
zdev->irqs_registered = 1;
return rc;
}
@@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
else
rc = zpci_clear_airq(zdev);
if (!rc)
zdev->irqs_registered = 0;
return rc;
}
@@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
if (!zdev->irqs_registered)
zpci_set_irq(zdev);
zpci_set_irq(zdev);
return true;
}

View File

@@ -75,7 +75,7 @@ export BITS
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2

View File

@@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_PANTHERLAKE_L:
case INTEL_WILDCATLAKE_L:
pr_cont("Pantherlake Hybrid events, ");
name = "pantherlake_hybrid";
goto lnl_common;

View File

@@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,
{
u64 val;
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
WARN_ON_ONCE(is_hybrid() &&
hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
val = hybrid_var(event->pmu, pebs_data_source)[dse];

View File

@@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),

View File

@@ -150,12 +150,12 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
#define INTEL_NOVALAKE IFM(18, 0x01)
#define INTEL_NOVALAKE_L IFM(18, 0x03)
#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */
#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */
/* "Small Core" Processors (Atom/E-Core) */

View File

@@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
KCFI_REFERENCE(clear_page_orig);
KCFI_REFERENCE(clear_page_rep);
KCFI_REFERENCE(clear_page_erms);
static inline void clear_page(void *page)
{

View File

@@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
case 0x50 ... 0x5f:
case 0x90 ... 0xaf:
case 0x80 ... 0xaf:
case 0xc0 ... 0xcf:
setup_force_cpu_cap(X86_FEATURE_ZEN6);
break;
@@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
}
}
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
};
static void init_amd_zen5(struct cpuinfo_x86 *c)
{
if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
clear_cpu_cap(c, X86_FEATURE_RDSEED);
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
}
}
static void init_amd(struct cpuinfo_x86 *c)
@@ -1355,11 +1365,23 @@ static __init int print_s5_reset_status_mmio(void)
return 0;
value = ioread32(addr);
iounmap(addr);
/* Value with "all bits set" is an error response and should be ignored. */
if (value == U32_MAX)
if (value == U32_MAX) {
iounmap(addr);
return 0;
}
/*
* Clear all reason bits so they won't be retained if the next reset
* does not update the register. Besides, some bits are never cleared by
* hardware so it's software's responsibility to clear them.
*
* Writing the value back effectively clears all reason bits as they are
* write-1-to-clear.
*/
iowrite32(value, addr);
iounmap(addr);
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
if (!(value & BIT(i)))

View File

@@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void)
break;
default:
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
pr_err(RETBLEED_INTEL_MSG);
if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
pr_err(RETBLEED_INTEL_MSG);
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
}
}
@@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state)
}
#endif
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
int len = strlen(opt);
return len == arglen && !strncmp(arg, opt, len);
}
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,

View File

@@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev)
}
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x80012: return cur_rev <= 0x8001277; break;
case 0x80082: return cur_rev <= 0x800820f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;
@@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)
return true;
}
static bool cpu_has_entrysign(void)
{
unsigned int fam = x86_family(bsp_cpuid_1_eax);
unsigned int model = x86_model(bsp_cpuid_1_eax);
if (fam == 0x17 || fam == 0x19)
return true;
if (fam == 0x1a) {
if (model <= 0x2f ||
(0x40 <= model && model <= 0x4f) ||
(0x60 <= model && model <= 0x6f))
return true;
}
return false;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17)
if (!cpu_has_entrysign())
return true;
if (!need_sha_check(cur_rev))

View File

@@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *ignored)
{
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
int cpu = cpumask_any(&d->hdr.cpu_mask);
struct arch_mbm_state *am;
u64 msr_val;
u32 prmid;
int ret;
@@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
ret = __rmid_read_phys(prmid, eventid, &msr_val);
if (ret)
return ret;
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
if (!ret) {
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
} else if (ret == -EINVAL) {
am = get_arch_mbm_state(hw_dom, rmid, eventid);
if (am)
am->prev_msr = 0;
}
return 0;
return ret;
}
static int __cntr_id_read(u32 cntr_id, u64 *val)
@@ -452,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
}
if (rdt_cpu_has(X86_FEATURE_ABMC)) {
/*
* resctrl assumes a system that supports assignable counters can
* switch to "default" mode. Ensure that there is a "default" mode
* to switch to. This enforces a dependency between the independent
* X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL
* hardware features.
*/
if (rdt_cpu_has(X86_FEATURE_ABMC) &&
(rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) {
r->mon.mbm_cntr_assignable = true;
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;

View File

@@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)
!fpregs_state_valid(fpu, smp_processor_id()))
os_xrstor_supervisor(fpu->fpstate);
/* Ensure XFD state is in sync before reloading XSTATE */
xfd_update_state(fpu->fpstate);
/* Reset user states in registers. */
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);

View File

@@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
}
start = fix_addr(__cpa_addr(cpa, 0));
end = fix_addr(__cpa_addr(cpa, cpa->numpages));
end = start + cpa->numpages * PAGE_SIZE;
if (cpa->force_flush_all)
end = TLB_FLUSH_ALL;

View File

@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
/*
* Make sure this CPU is set in mm_cpumask() such that we'll
* receive invalidation IPIs.
*
* Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
* operation, or explicitly provide one. Such that:
*
* switch_mm_irqs_off() flush_tlb_mm_range()
* smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
* smp_mb(); // here // smp_mb() implied
* atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
*
* we properly order against flush_tlb_mm_range(), where the
* loaded_mm load can happen in mative_flush_tlb_multi() ->
* should_flush_tlb().
*
* This way switch_mm() must see the new tlb_gen or
* flush_tlb_mm_range() must see the new loaded_mm, or both.
*/
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
else
smp_mb();
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
ns = choose_new_asid(next, next_tlb_gen);

View File

@@ -2701,7 +2701,7 @@ st: if (is_imm8(insn->off))
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
if (bpf_prog_was_classic(bpf_prog) &&
!capable(CAP_SYS_ADMIN)) {
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
u8 *ip = image + addrs[i - 1];
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))

View File

@@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
}
if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_IOERR;
bio->bi_status = BLK_STS_INVAL;
goto fail;
}

View File

@@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
/*
* The PI generation / validation helpers do not expect intervals to
* straddle multiple bio_vecs. Enforce alignment so that those are
* never generated, and that each buffer is aligned as expected.
*/
if (bi->csum_type) {
lim->dma_alignment = max(lim->dma_alignment,
(1U << bi->interval_exp) - 1);
}
return 0;
}

View File

@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
if (mrrm->header.revision != 1)
return -EINVAL;
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;

View File

@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
cancel_delayed_work_sync(&dev->switch_brightness_work);
}
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);

View File

@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
#pragma GCC diagnostic push
#if defined(__GNUC__) && __GNUC__ >= 11
#pragma GCC diagnostic ignored "-Wstringop-overread"
#endif
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
#pragma GCC diagnostic pop
}

View File

@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
if (error) {
input_free_device(input);
goto err_remove_fs;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:

View File

@@ -49,6 +49,7 @@ struct acpi_fan_fst {
};
struct acpi_fan {
acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
@@ -59,14 +60,14 @@ struct acpi_fan {
struct device_attribute fine_grain_control;
};
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
int devm_acpi_fan_create_hwmon(struct acpi_device *device);
int devm_acpi_fan_create_hwmon(struct device *dev);
#else
static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
#endif
#endif

View File

@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
status = acpi_fan_get_fst(acpi_dev, &fst);
status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;

View File

@@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
return -ENODEV;
}
status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status))
return -EIO;
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
ret = -EINVAL;
if (!obj)
return -ENODATA;
if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
ret = -EPROTO;
goto err;
}
if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EPROTO;
goto err;
}
@@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
status = acpi_fan_get_fst(device, &fst);
status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
if (!device)
return -ENODEV;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
@@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;

View File

@@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
ret = acpi_fan_get_fst(adev, &fst);
ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
int devm_acpi_fan_create_hwmon(struct acpi_device *device)
int devm_acpi_fan_create_hwmon(struct device *dev)
{
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct device *hdev;
hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
&acpi_fan_hwmon_chip_info, NULL);
hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hdev);
}

View File

@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
size_t num_args,
struct fwnode_reference_args *args)
{
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);

View File

@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
/**
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
*
* @node: RIMT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
{
struct fwnode_handle *fwnode = NULL;
struct rimt_fwnode *curr;
spin_lock(&rimt_fwnode_lock);
list_for_each_entry(curr, &rimt_fwnode_list, list) {
if (curr->rimt_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&rimt_fwnode_lock);
return fwnode;
}
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
/*
* RISC-V supports IOMMU as a PCI device or a platform device.
* When it is a platform device, there should be a namespace device as
* well along with RIMT. To create the link between RIMT information and
* the platform device, the IOMMU driver should register itself with the
* RIMT module. This is true for PCI based IOMMU as well.
*/
int rimt_iommu_register(struct device *dev)
{
struct fwnode_handle *rimt_fwnode;
struct acpi_rimt_node *node;
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
if (!node) {
pr_err("Could not find IOMMU node in RIMT\n");
return -ENODEV;
}
if (dev_is_pci(dev)) {
rimt_fwnode = acpi_alloc_fwnode_static();
if (!rimt_fwnode)
return -ENOMEM;
rimt_fwnode->dev = dev;
if (!dev->fwnode)
dev->fwnode = rimt_fwnode;
rimt_set_fwnode(node, rimt_fwnode);
} else {
rimt_set_fwnode(node, dev->fwnode);
}
return 0;
}
#ifdef CONFIG_IOMMU_API
/**
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
*
* @node: RIMT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
{
struct fwnode_handle *fwnode = NULL;
struct rimt_fwnode *curr;
spin_lock(&rimt_fwnode_lock);
list_for_each_entry(curr, &rimt_fwnode_list, list) {
if (curr->rimt_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&rimt_fwnode_lock);
return fwnode;
}
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
/*
* RISC-V supports IOMMU as a PCI device or a platform device.
* When it is a platform device, there should be a namespace device as
* well along with RIMT. To create the link between RIMT information and
* the platform device, the IOMMU driver should register itself with the
* RIMT module. This is true for PCI based IOMMU as well.
*/
int rimt_iommu_register(struct device *dev)
{
struct fwnode_handle *rimt_fwnode;
struct acpi_rimt_node *node;
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
if (!node) {
pr_err("Could not find IOMMU node in RIMT\n");
return -ENODEV;
}
if (dev_is_pci(dev)) {
rimt_fwnode = acpi_alloc_fwnode_static();
if (!rimt_fwnode)
return -ENOMEM;
rimt_fwnode->dev = dev;
if (!dev->fwnode)
dev->fwnode = rimt_fwnode;
rimt_set_fwnode(node, rimt_fwnode);
} else {
rimt_set_fwnode(node, dev->fwnode);
}
return 0;
}
#ifdef CONFIG_IOMMU_API
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)

View File

@@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* Baud Rate field. If this field is zero or not present, Configured
* Baud Rate is used.
*/
if (table->precise_baudrate)
if (table->header.revision >= 4 && table->precise_baudrate)
baud_rate = table->precise_baudrate;
else switch (table->baud_rate) {
case 0:

View File

@@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
if (target_list == NULL) {
pr_err("invalid inc weak node for %d\n",
node->debug_id);
return -EINVAL;
}
/*
* See comment above
*/
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
@@ -2418,10 +2409,10 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
* @offset offset in target buffer to fixup
* @skip_size bytes to skip in copy (fixup will be written later)
* @fixup_data data to write at fixup offset
* @node list node
* @offset: offset in target buffer to fixup
* @skip_size: bytes to skip in copy (fixup will be written later)
* @fixup_data: data to write at fixup offset
* @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
* @offset offset in target buffer
* @sender_uaddr user address in source buffer
* @length bytes to copy
* @node list node
* @offset: offset in target buffer
* @sender_uaddr: user address in source buffer
* @length: bytes to copy
* @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
* @buffer: buffer to be freed
* @is_failure: failed to send transaction
* @proc: binder proc that owns buffer
* @thread: binder thread performing the buffer release
* @buffer: buffer to be freed
* @is_failure: failed to send transaction
*
* If buffer for an async transaction, enqueue the next async
* If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
* Cleanup buffer and free it.
* Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,

View File

@@ -106,13 +106,22 @@ fn do_work(
return Ok(true);
}
if freeze.is_clearing {
_removed_listener = freeze_entry.remove_node();
kernel::warn_on!(freeze.num_cleared_duplicates != 0);
if freeze.num_pending_duplicates > 0 {
// The primary freeze listener was deleted, so convert a pending duplicate back
// into the primary one.
freeze.num_pending_duplicates -= 1;
freeze.is_pending = true;
freeze.is_clearing = true;
} else {
_removed_listener = freeze_entry.remove_node();
}
drop(node_refs);
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
writer.write_payload(&self.cookie.0)?;
Ok(true)
} else {
let is_frozen = freeze.node.owner.inner.lock().is_frozen;
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.last_is_frozen == Some(is_frozen) {
return Ok(true);
}
@@ -245,8 +254,9 @@ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader)
);
return Err(EINVAL);
}
if freeze.is_clearing {
// Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
// Immediately send another FreezeMessage.
clear_msg = Some(FreezeMessage::init(alloc, cookie));
}
freeze.is_pending = false;

View File

@@ -687,7 +687,7 @@ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
);
}
if inner.freeze_list.is_empty() {
_unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
_unused_capacity = mem::take(&mut inner.freeze_list);
}
}

View File

@@ -72,6 +72,33 @@ fn new(address: usize, size: usize) -> Self {
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
#[derive(Copy, Clone)]
pub(crate) enum IsFrozen {
Yes,
No,
InProgress,
}
impl IsFrozen {
/// Whether incoming transactions should be rejected due to freeze.
pub(crate) fn is_frozen(self) -> bool {
match self {
IsFrozen::Yes => true,
IsFrozen::No => false,
IsFrozen::InProgress => true,
}
}
/// Whether freeze notifications consider this process frozen.
pub(crate) fn is_fully_frozen(self) -> bool {
match self {
IsFrozen::Yes => true,
IsFrozen::No => false,
IsFrozen::InProgress => false,
}
}
}
/// The fields of `Process` protected by the spinlock.
pub(crate) struct ProcessInner {
is_manager: bool,
@@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
/// are woken up.
outstanding_txns: u32,
/// Process is frozen and unable to service binder transactions.
pub(crate) is_frozen: bool,
pub(crate) is_frozen: IsFrozen,
/// Process received sync transactions since last frozen.
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
@@ -124,7 +151,7 @@ fn new() -> Self {
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
is_frozen: false,
is_frozen: IsFrozen::No,
sync_recv: false,
async_recv: false,
binderfs_file: None,
@@ -1260,7 +1287,7 @@ fn deferred_release(self: Arc<Self>) {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
@@ -1346,10 +1373,6 @@ fn deferred_release(self: Arc<Self>) {
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
pr_warn!(
"{}: removing orphan mapping {offset}:{size}\n",
self.pid_in_current_ns()
);
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
@@ -1371,7 +1394,7 @@ pub(crate) fn drop_outstanding_txn(&self) {
return;
}
inner.outstanding_txns -= 1;
inner.is_frozen && inner.outstanding_txns == 0
inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
};
if wake {
@@ -1385,7 +1408,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
drop(inner);
msgs.send_messages();
return Ok(());
@@ -1394,7 +1417,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = true;
inner.is_frozen = IsFrozen::InProgress;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
@@ -1408,7 +1431,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
@@ -1422,17 +1445,18 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
}
if inner.txns_pending_locked() {
inner.is_frozen = false;
inner.is_frozen = IsFrozen::No;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
self.inner.lock().is_frozen = IsFrozen::Yes;
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
self.inner.lock().is_frozen = false;
self.inner.lock().is_frozen = IsFrozen::No;
Err(ENOMEM)
}
}

View File

@@ -249,7 +249,7 @@ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
if oneway {
if let Some(target_node) = self.target_node.clone() {
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
if let Some(t_outdated) =
@@ -270,7 +270,7 @@ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
}
}
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
return Err(BinderError::new_frozen_oneway());
} else {
return Ok(());
@@ -280,7 +280,7 @@ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
}
}
if process_inner.is_frozen {
if process_inner.is_frozen.is_frozen() {
process_inner.sync_recv = true;
return Err(BinderError::new_frozen());
}

View File

@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);

View File

@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
dev_warn(sup, "sync_state() pending due to %s\n",
dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}

View File

@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
* There are 2 races for which mutex is required.
*
* The first race is between device creation and userspace writing to
* schedule immediately destruction.
*
* This race is handled by arming the timer before device creation, but
* when device creation fails the timer still exists.
*
* cpu0(X) cpu1(Y)
* To solve this, hold the mutex during device_add(), and set
* init_completed on success before releasing the mutex.
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
* That way the timer will never fire until device_add() is called,
* it will do nothing if init_completed is not set. The timer is also
* cancelled in that case.
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* timer_delete()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
* The second race involves multiple parallel invocations of devcd_free(),
* add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
bool delete_work;
bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
/*
* If nothing interferes and device_add() was returns success,
* del_wk will destroy the device after the timer fires.
*
* Multiple userspace processes can interfere in the working of the timer:
* - Writing to the coredump will reschedule the timer to run immediately,
* if still armed.
*
* This is handled by using "if (cancel_delayed_work()) {
* schedule_delayed_work() }", to prevent re-arming after having
* been previously fired.
* - Writing to /sys/class/devcoredump/disabled will destroy the
* coredump synchronously.
* This is handled by using disable_delayed_work_sync(), and then
* checking if deleted flag is set with &devcd->mutex held.
*/
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
static void __devcd_del(struct devcd_entry *devcd)
{
devcd->deleted = true;
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
}
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
/* devcd->mutex serializes against dev_coredumpm_timeout */
mutex_lock(&devcd->mutex);
init_completed = devcd->init_completed;
mutex_unlock(&devcd->mutex);
if (init_completed)
__devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
/*
* Although it's tempting to use mod_delayed work here,
* that will cause a reschedule if the timer already fired.
*/
if (cancel_delayed_work(&devcd->del_wk))
schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
/*
* To prevent a race with devcd_data_write(), disable work and
* complete manually instead.
*
* We cannot rely on the return value of
* disable_delayed_work_sync() here, because it might be in the
* middle of a cancel_delayed_work + schedule_delayed_work pair.
*
* devcd->mutex here guards against multiple parallel invocations
* of devcd_free().
*/
disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
if (!devcd->deleted)
__devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
/* devcd->mutex prevents devcd_del() completing until init finishes */
mutex_lock(&devcd->mutex);
devcd->init_completed = false;
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
/*
* Safe to run devcd_del() now that we are done with devcd_dev.
* Alternatively we could have taken a ref on devcd_dev before
* dropping the lock.
*/
devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
cancel_delayed_work_sync(&devcd->del_wk);
put_device(&devcd->devcd_dev);
put_module:
module_put(owner);
free:

View File

@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
lock_key, lock_name);
return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
lock_key, lock_name);
return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);

View File

@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
struct device_node *np;
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
np = core->dev.of_node;
if (np && !of_device_is_available(np))
continue;
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)

View File

@@ -52,6 +52,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
static struct cred *nbd_cred;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
int result;
struct msghdr msg = {} ;
unsigned int noreclaim_flag;
const struct cred *old_cred;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
return -EINVAL;
}
old_cred = override_creds(nbd_cred);
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
@@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
memalloc_noreclaim_restore(noreclaim_flag);
revert_creds(old_cred);
return result;
}
@@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
return -ENOMEM;
}
nbd_cred = prepare_kernel_cred(&init_task);
if (!nbd_cred) {
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -ENOMEM;
}
if (genl_register_family(&nbd_genl_family)) {
put_cred(nbd_cred);
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
@@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
put_cred(nbd_cred);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}

View File

@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
.dma_alignment = dev->blocksize - 1,
};
struct nullb *nullb;

View File

@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);

Some files were not shown because too many files have changed in this diff Show More