ASoC: codecs: add support for ES8389

Merge series from Zhang Yi <zhangyi@everest-semi.com>:

The driver is for codec ES8389 of everest-semi.
This commit is contained in:
Mark Brown
2025-05-15 11:43:48 +02:00
309 changed files with 4263 additions and 1550 deletions

View File

@@ -7,5 +7,5 @@ check-private-items = true
disallowed-macros = [
# The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
# it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
]

View File

@@ -447,6 +447,8 @@ Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com>
Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
@@ -483,6 +485,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
Matthieu CASTET <castet.matthieu@free.fr>
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com>
Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
@@ -749,6 +752,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de>

View File

@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek's Keypad Controller
maintainers:
- Mattijs Korpershoek <mkorpershoek@baylibre.com>
- Mattijs Korpershoek <mkorpershoek@kernel.org>
allOf:
- $ref: /schemas/input/matrix-keymap.yaml#

View File

@@ -74,19 +74,17 @@ properties:
- rev-rmii
- moca
# RX and TX delays are added by the MAC when required
# RX and TX delays are provided by the PCB. See below
- rgmii
# RGMII with internal RX and TX delays provided by the PHY,
# the MAC should not add the RX or TX delays in this case
# RX and TX delays are not provided by the PCB. This is the most
# frequent case. See below
- rgmii-id
# RGMII with internal RX delay provided by the PHY, the MAC
# should not add an RX delay in this case
# TX delay is provided by the PCB. See below
- rgmii-rxid
# RGMII with internal TX delay provided by the PHY, the MAC
# should not add an TX delay in this case
# RX delay is provided by the PCB. See below
- rgmii-txid
- rtbi
- smii
@@ -286,4 +284,89 @@ allOf:
additionalProperties: true
# Informative
# ===========
#
# 'phy-modes' & 'phy-connection-type' properties 'rgmii', 'rgmii-id',
# 'rgmii-rxid', and 'rgmii-txid' are frequently used wrongly by
# developers. This informative section clarifies their usage.
#
# The RGMII specification requires a 2ns delay between the data and
# clock signals on the RGMII bus. How this delay is implemented is not
# specified.
#
# One option is to make the clock traces on the PCB longer than the
# data traces. A sufficiently difference in length can provide the 2ns
# delay. If both the RX and TX delays are implemented in this manner,
# 'rgmii' should be used, so indicating the PCB adds the delays.
#
# If the PCB does not add these delays via extra long traces,
# 'rgmii-id' should be used. Here, 'id' refers to 'internal delay',
# where either the MAC or PHY adds the delay.
#
# If only one of the two delays are implemented via extra long clock
# lines, either 'rgmii-rxid' or 'rgmii-txid' should be used,
# indicating the MAC or PHY should implement one of the delays
# internally, while the PCB implements the other delay.
#
# Device Tree describes hardware, and in this case, it describes the
# PCB between the MAC and the PHY, if the PCB implements delays or
# not.
#
# In practice, very few PCBs make use of extra long clock lines. Hence
# any RGMII phy mode other than 'rgmii-id' is probably wrong, and is
# unlikely to be accepted during review without details provided in
# the commit description and comments in the .dts file.
#
# When the PCB does not implement the delays, the MAC or PHY must. As
# such, this is software configuration, and so not described in Device
# Tree.
#
# The following describes how Linux implements the configuration of
# the MAC and PHY to add these delays when the PCB does not. As stated
# above, developers often get this wrong, and the aim of this section
# is reduce the frequency of these errors by Linux developers. Other
# users of the Device Tree may implement it differently, and still be
# consistent with both the normative and informative description
# above.
#
# By default in Linux, when using phylib/phylink, the MAC is expected
# to read the 'phy-mode' from Device Tree, not implement any delays,
# and pass the value to the PHY. The PHY will then implement delays as
# specified by the 'phy-mode'. The PHY should always be reconfigured
# to implement the needed delays, replacing any setting performed by
# strapping or the bootloader, etc.
#
# Experience to date is that all PHYs which implement RGMII also
# implement the ability to add or not add the needed delays. Hence
# this default is expected to work in all cases. Ignoring this default
# is likely to be questioned by Reviews, and require a strong argument
# to be accepted.
#
# There are a small number of cases where the MAC has hard coded
# delays which cannot be disabled. The 'phy-mode' only describes the
# PCB. The inability to disable the delays in the MAC does not change
# the meaning of 'phy-mode'. It does however mean that a 'phy-mode' of
# 'rgmii' is now invalid, it cannot be supported, since both the PCB
# and the MAC and PHY adding delays cannot result in a functional
# link. Thus the MAC should report a fatal error for any modes which
# cannot be supported. When the MAC implements the delay, it must
# ensure that the PHY does not also implement the same delay. So it
# must modify the phy-mode it passes to the PHY, removing the delay it
# has added. Failure to remove the delay will result in a
# non-functioning link.
#
# Sometimes there is a need to fine tune the delays. Often the MAC or
# PHY can perform this fine tuning. In the MAC node, the Device Tree
# properties 'rx-internal-delay-ps' and 'tx-internal-delay-ps' should
# be used to indicate fine tuning performed by the MAC. The values
# expected here are small. A value of 2000ps, i.e 2ns, and a phy-mode
# of 'rgmii' will not be accepted by Reviewers.
#
# If the PHY is to perform fine tuning, the properties
# 'rx-internal-delay-ps' and 'tx-internal-delay-ps' in the PHY node
# should be used. When the PHY is implementing delays, e.g. 'rgmii-id'
# these properties should have a value near to 2000ps. If the PCB is
# implementing delays, e.g. 'rgmii', a small value can be used to fine
# tune the delay added by the PCB.
...

View File

@@ -0,0 +1,50 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/sound/everest,es8389.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Everest ES8389 audio CODEC
maintainers:
- Michael Zhang <zhangyi@everest-semi.com>
allOf:
- $ref: dai-common.yaml#
properties:
compatible:
const: everest,es8389
reg:
maxItems: 1
clocks:
items:
- description: clock for master clock (MCLK)
clock-names:
items:
- const: mclk
"#sound-dai-cells":
const: 0
required:
- compatible
- reg
- "#sound-dai-cells"
additionalProperties: false
examples:
- |
i2c {
#address-cells = <1>;
#size-cells = <0>;
es8389: codec@10 {
compatible = "everest,es8389";
reg = <0x10>;
#sound-dai-cells = <0>;
};
};

View File

@@ -2519,6 +2519,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/boot/dts/nxp/imx/
F: arch/arm/boot/dts/nxp/mxs/
F: arch/arm64/boot/dts/freescale/
X: Documentation/devicetree/bindings/media/i2c/
X: arch/arm64/boot/dts/freescale/fsl-*
X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
@@ -8726,6 +8727,7 @@ M: Chao Yu <chao@kernel.org>
R: Yue Hu <zbestahu@gmail.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
R: Sandeep Dhavale <dhavale@google.com>
R: Hongbo Li <lihongbo22@huawei.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -11235,7 +11237,6 @@ S: Maintained
F: drivers/i2c/busses/i2c-cht-wc.c
I2C/SMBUS ISMT DRIVER
M: Seth Heasley <seth.heasley@intel.com>
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-i2c@vger.kernel.org
F: Documentation/i2c/busses/i2c-ismt.rst
@@ -15071,7 +15072,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml
F: drivers/media/platform/mediatek/jpeg/
MEDIATEK KEYPAD DRIVER
M: Mattijs Korpershoek <mkorpershoek@baylibre.com>
M: Mattijs Korpershoek <mkorpershoek@kernel.org>
S: Supported
F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
F: drivers/input/keyboard/mt6779-keypad.c
@@ -15494,24 +15495,45 @@ F: Documentation/mm/
F: include/linux/gfp.h
F: include/linux/gfp_types.h
F: include/linux/memfd.h
F: include/linux/memory.h
F: include/linux/memory_hotplug.h
F: include/linux/memory-tiers.h
F: include/linux/mempolicy.h
F: include/linux/mempool.h
F: include/linux/memremap.h
F: include/linux/mm.h
F: include/linux/mm_*.h
F: include/linux/mmzone.h
F: include/linux/mmu_notifier.h
F: include/linux/pagewalk.h
F: include/linux/rmap.h
F: include/trace/events/ksm.h
F: mm/
F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
MEMORY MANAGEMENT - CORE
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Mike Rapoport <rppt@kernel.org>
R: Suren Baghdasaryan <surenb@google.com>
R: Michal Hocko <mhocko@suse.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: include/linux/memory.h
F: include/linux/mm.h
F: include/linux/mm_*.h
F: include/linux/mmdebug.h
F: include/linux/pagewalk.h
F: mm/Kconfig
F: mm/debug.c
F: mm/init-mm.c
F: mm/memory.c
F: mm/pagewalk.c
F: mm/util.c
MEMORY MANAGEMENT - EXECMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15545,6 +15567,19 @@ F: mm/page_alloc.c
F: include/linux/gfp.h
F: include/linux/compaction.h
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Rik van Riel <riel@surriel.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Harry Yoo <harry.yoo@oracle.com>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/rmap.h
F: mm/rmap.c
MEMORY MANAGEMENT - SECRETMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15553,6 +15588,30 @@ S: Maintained
F: include/linux/secretmem.h
F: mm/secretmem.c
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
R: Zi Yan <ziy@nvidia.com>
R: Baolin Wang <baolin.wang@linux.alibaba.com>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Nico Pache <npache@redhat.com>
R: Ryan Roberts <ryan.roberts@arm.com>
R: Dev Jain <dev.jain@arm.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: Documentation/admin-guide/mm/transhuge.rst
F: include/linux/huge_mm.h
F: include/linux/khugepaged.h
F: include/trace/events/huge_memory.h
F: mm/huge_memory.c
F: mm/khugepaged.c
F: tools/testing/selftests/mm/khugepaged.c
F: tools/testing/selftests/mm/split_huge_page_test.c
F: tools/testing/selftests/mm/transhuge-stress.c
MEMORY MANAGEMENT - USERFAULTFD
M: Andrew Morton <akpm@linux-foundation.org>
R: Peter Xu <peterx@redhat.com>

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@@ -40,6 +40,9 @@ ethphy1: ethernet-phy@1 {
reg = <1>;
interrupt-parent = <&gpio4>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
status = "okay";
};
};

View File

@@ -44,7 +44,7 @@ cpu0: cpu@0 {
next-level-cache = <&l2_0>;
clocks = <&scmi_dvfs 0>;
l2_0: l2-cache-0 {
l2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -53,13 +53,6 @@ l2_0: l2-cache-0 {
cache-sets = <2048>;
cache-unified;
next-level-cache = <&l3_0>;
l3_0: l3-cache {
compatible = "cache";
cache-level = <3>;
cache-size = <0x100000>;
cache-unified;
};
};
};
@@ -78,7 +71,7 @@ cpu1: cpu@100 {
next-level-cache = <&l2_1>;
clocks = <&scmi_dvfs 0>;
l2_1: l2-cache-1 {
l2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -105,7 +98,7 @@ cpu2: cpu@10000 {
next-level-cache = <&l2_2>;
clocks = <&scmi_dvfs 1>;
l2_2: l2-cache-2 {
l2_2: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -132,7 +125,7 @@ cpu3: cpu@10100 {
next-level-cache = <&l2_3>;
clocks = <&scmi_dvfs 1>;
l2_3: l2-cache-3 {
l2_3: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -143,6 +136,13 @@ l2_3: l2-cache-3 {
next-level-cache = <&l3_0>;
};
};
l3_0: l3-cache {
compatible = "cache";
cache-level = <3>;
cache-size = <0x100000>;
cache-unified;
};
};
firmware {

View File

@@ -144,6 +144,19 @@ reg_usdhc2_vmmc: regulator-usdhc2 {
startup-delay-us = <20000>;
};
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
compatible = "regulator-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2_vsel>;
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <1800000>;
states = <1800000 0x1>,
<3300000 0x0>;
regulator-name = "PMIC_USDHC_VSELECT";
vin-supply = <&reg_nvcc_sd>;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -269,7 +282,7 @@ &gpio1 {
"SODIMM_19",
"",
"",
"",
"PMIC_USDHC_VSELECT",
"",
"",
"",
@@ -785,6 +798,7 @@ &usdhc2 {
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
vmmc-supply = <&reg_usdhc2_vmmc>;
vqmmc-supply = <&reg_usdhc2_vqmmc>;
};
&wdog1 {
@@ -1206,13 +1220,17 @@ pinctrl_usdhc2_pwr_en: usdhc2pwrengrp {
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
};
pinctrl_usdhc2_vsel: usdhc2vselgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
};
/*
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
*/
pinctrl_usdhc2: usdhc2grp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
@@ -1223,7 +1241,6 @@ pinctrl_usdhc2: usdhc2grp {
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
@@ -1234,7 +1251,6 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
@@ -1246,7 +1262,6 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
/* Avoid backfeeding with removed card power */
pinctrl_usdhc2_sleep: usdhc2slpgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,

View File

@@ -24,6 +24,20 @@ &clk {
fsl,operating-mode = "nominal";
};
&gpu2d {
assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
assigned-clock-rates = <800000000>;
};
&gpu3d {
assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>,
<&clk IMX8MP_CLK_GPU3D_SHADER_CORE>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>;
assigned-clock-rates = <800000000>, <800000000>;
};
&pgc_hdmimix {
assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
<&clk IMX8MP_CLK_HDMI_APB>;
@@ -46,6 +60,18 @@ &pgc_gpumix {
assigned-clock-rates = <600000000>, <300000000>;
};
&pgc_mlmix {
assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
<&clk IMX8MP_CLK_ML_AXI>,
<&clk IMX8MP_CLK_ML_AHB>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>;
assigned-clock-rates = <800000000>,
<800000000>,
<300000000>;
};
&media_blk_ctrl {
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
<&clk IMX8MP_CLK_MEDIA_APB>,

View File

@@ -1626,7 +1626,7 @@ pcie0: pcie@4c300000 {
reg = <0 0x4c300000 0 0x10000>,
<0 0x60100000 0 0xfe00000>,
<0 0x4c360000 0 0x10000>,
<0 0x4c340000 0 0x2000>;
<0 0x4c340000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
@@ -1673,7 +1673,7 @@ pcie0_ep: pcie-ep@4c300000 {
reg = <0 0x4c300000 0 0x10000>,
<0 0x4c360000 0 0x1000>,
<0 0x4c320000 0 0x1000>,
<0 0x4c340000 0 0x2000>,
<0 0x4c340000 0 0x4000>,
<0 0x4c370000 0 0x10000>,
<0x9 0 1 0>;
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
@@ -1700,7 +1700,7 @@ pcie1: pcie@4c380000 {
reg = <0 0x4c380000 0 0x10000>,
<8 0x80100000 0 0xfe00000>,
<0 0x4c3e0000 0 0x10000>,
<0 0x4c3c0000 0 0x2000>;
<0 0x4c3c0000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
@@ -1749,7 +1749,7 @@ pcie1_ep: pcie-ep@4c380000 {
reg = <0 0x4c380000 0 0x10000>,
<0 0x4c3e0000 0 0x1000>,
<0 0x4c3a0000 0 0x1000>,
<0 0x4c3c0000 0 0x2000>,
<0 0x4c3c0000 0 0x4000>,
<0 0x4c3f0000 0 0x10000>,
<0xa 0 1 0>;
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";

View File

@@ -116,11 +116,11 @@ syscfg: syscon@44230000 {
};
intc: interrupt-controller@4ac10000 {
compatible = "arm,cortex-a7-gic";
compatible = "arm,gic-400";
reg = <0x4ac10000 0x0 0x1000>,
<0x4ac20000 0x0 0x2000>,
<0x4ac40000 0x0 0x2000>,
<0x4ac60000 0x0 0x2000>;
<0x4ac20000 0x0 0x20000>,
<0x4ac40000 0x0 0x20000>,
<0x4ac60000 0x0 0x20000>;
#interrupt-cells = <3>;
interrupt-controller;
};

View File

@@ -1201,13 +1201,12 @@ exti2: interrupt-controller@46230000 {
};
intc: interrupt-controller@4ac10000 {
compatible = "arm,cortex-a7-gic";
compatible = "arm,gic-400";
reg = <0x4ac10000 0x1000>,
<0x4ac20000 0x2000>,
<0x4ac40000 0x2000>,
<0x4ac60000 0x2000>;
<0x4ac20000 0x20000>,
<0x4ac40000 0x20000>,
<0x4ac60000 0x20000>;
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
};
};

View File

@@ -115,14 +115,13 @@ scmi_vdda18adc: regulator@7 {
};
intc: interrupt-controller@4ac00000 {
compatible = "arm,cortex-a7-gic";
compatible = "arm,gic-400";
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
reg = <0x0 0x4ac10000 0x0 0x1000>,
<0x0 0x4ac20000 0x0 0x2000>,
<0x0 0x4ac40000 0x0 0x2000>,
<0x0 0x4ac60000 0x0 0x2000>;
<0x0 0x4ac20000 0x0 0x20000>,
<0x0 0x4ac40000 0x0 0x20000>,
<0x0 0x4ac60000 0x0 0x20000>;
};
psci {

View File

@@ -52,7 +52,7 @@
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
mov_q x0, HCRX_HOST_FLAGS
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1

View File

@@ -100,9 +100,8 @@
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0
/* TCR_EL2 Registers bits */

View File

@@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return res;
}
#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
const struct vdso_time_data *ret = &vdso_u_time_data;
/* Work around invalid absolute relocations */
OPTIMIZER_HIDE_VAR(ret);
return ret;
}
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */

View File

@@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
bool arm64_use_ng_mappings = false;
/*
* arm64_use_ng_mappings must be placed in the .data section, otherwise it
* ends up in the .bss section where it is initialized in early_map_kernel()
* after the MMU (with the idmap) was enabled. create_init_idmap() - which
* runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
* may end up generating an incorrect idmap page table attributes.
*/
bool arm64_use_ng_mappings __read_mostly = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;

View File

@@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
@@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* EL1 instead of being trapped to EL2.
*/
if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;
write_sysreg(0, pmselr_el0);
hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
hcrx &= ~clr;
}
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}
@@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;
hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();

View File

@@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
int ret;
if (!addr_is_memory(addr))
if (!range_is_memory(addr, addr + size))
return -EPERM;
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,

View File

@@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void)
/*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
* view. To do that safely, we have to prevent any interrupt
* from firing (which would be deadly).
* view.
*
* Note that this only makes sense on VHE, as interrupts are
* already masked for nVHE as part of the exception entry to
* EL2.
*/
if (has_vhe())
flags = local_daif_save();
/*
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
* interrupt overrides must be set. You've got to love this.
*
* As we always run VHE with HCR_xMO set, no extra xMO
* manipulation is required in that case.
*
* To safely disable SRE, we have to prevent any interrupt
* from firing (which would be deadly). This only makes sense
* on VHE, as interrupts are already masked for nVHE as part
* of the exception entry to EL2.
*/
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
isb();
if (has_vhe()) {
flags = local_daif_save();
} else {
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
isb();
}
write_gicreg(0, ICC_SRE_EL1);
isb();
@@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void)
write_gicreg(sre, ICC_SRE_EL1);
isb();
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
isb();
if (has_vhe())
if (has_vhe()) {
local_daif_restore(flags);
} else {
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
isb();
}
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
val |= read_gicreg(ICH_VTR_EL2);

View File

@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
if (!is_protected_kvm_enabled())
memcache = &vcpu->arch.mmu_page_cache;
else
memcache = &vcpu->arch.pkvm_memcache;
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!fault_is_perm || (logging_active && write_fault)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
if (!is_protected_kvm_enabled()) {
memcache = &vcpu->arch.mmu_page_cache;
if (!is_protected_kvm_enabled())
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
} else {
memcache = &vcpu->arch.pkvm_memcache;
else
ret = topup_hyp_memcache(memcache, min_pages);
}
if (ret)
return ret;
}

View File

@@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
return -EINVAL;
return set_id_reg(vcpu, rd, user_val);
}

View File

@@ -6,11 +6,10 @@
#include <linux/linkage.h>
extern void (*cpu_wait)(void);
extern void r4k_wait(void);
extern asmlinkage void __r4k_wait(void);
extern asmlinkage void r4k_wait(void);
extern void r4k_wait_irqoff(void);
static inline int using_rollback_handler(void)
static inline int using_skipover_handler(void)
{
return cpu_wait == r4k_wait;
}

View File

@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
#define MAX_REG_OFFSET \
(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset

View File

@@ -104,48 +104,59 @@ handle_vcei:
__FINIT
.align 5 /* 32 byte rollback region */
LEAF(__r4k_wait)
.set push
.set noreorder
/* start of rollback region */
LONG_L t0, TI_FLAGS($28)
nop
andi t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
#ifdef CONFIG_CPU_MICROMIPS
nop
nop
nop
nop
#endif
.section .cpuidle.text,"ax"
/* Align to 32 bytes for the maximum idle interrupt region size. */
.align 5
LEAF(r4k_wait)
/* Keep the ISA bit clear for calculations on local labels here. */
0: .fill 0
/* Start of idle interrupt region. */
local_irq_enable
/*
* If an interrupt lands here, before going idle on the next
* instruction, we must *NOT* go idle since the interrupt could
* have set TIF_NEED_RESCHED or caused a timer to need resched.
* Fall through -- see skipover_handler below -- and have the
* idle loop take care of things.
*/
1: .fill 0
/* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
.if 1b - 0b > 32
.error "overlong idle interrupt region"
.elseif 1b - 0b > 8
.align 4
.endif
2: .fill 0
.equ r4k_wait_idle_size, 2b - 0b
/* End of idle interrupt region; size has to be a power of 2. */
.set MIPS_ISA_ARCH_LEVEL_RAW
r4k_wait_insn:
wait
/* end of rollback region (the region size must be power of two) */
1:
r4k_wait_exit:
.set mips0
local_irq_disable
jr ra
nop
.set pop
END(__r4k_wait)
END(r4k_wait)
.previous
.macro BUILD_ROLLBACK_PROLOGUE handler
FEXPORT(rollback_\handler)
.macro BUILD_SKIPOVER_PROLOGUE handler
FEXPORT(skipover_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f
/* Subtract/add 2 to let the ISA bit propagate through the mask. */
PTR_LA k1, r4k_wait_insn - 2
ori k0, r4k_wait_idle_size - 2
.set noreorder
bne k0, k1, \handler
PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
.set reorder
MTC0 k0, CP0_EPC
.set pop
.endm
.align 5
BUILD_ROLLBACK_PROLOGUE handle_int
BUILD_SKIPOVER_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
.cfi_signal_frame
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
BUILD_ROLLBACK_PROLOGUE except_vec_vi
BUILD_SKIPOVER_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME docfi=1
SAVE_AT docfi=1

View File

@@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
write_c0_conf(cfg | R30XX_CONF_HALT);
}
void __cpuidle r4k_wait(void)
{
raw_local_irq_enable();
__r4k_wait();
raw_local_irq_disable();
}
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is

View File

@@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg = kcalloc(nclusters,
sizeof(*mips_cps_cluster_bootcfg),
GFP_KERNEL);
if (!mips_cps_cluster_bootcfg)
goto err_out;
if (nclusters > 1)
mips_cm_update_property();
@@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg[cl].core_power =
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
GFP_KERNEL);
if (!mips_cps_cluster_bootcfg[cl].core_power)
goto err_out;
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {

View File

@@ -77,7 +77,7 @@
#include "access-helper.h"
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void skipover_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
@@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
{
extern const u8 except_vec_vi[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
extern const u8 rollback_except_vec_vi[];
extern const u8 skipover_except_vec_vi[];
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
@@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
change_c0_srsmap(0xf << n*4, 0 << n*4);
}
vec_start = using_rollback_handler() ? rollback_except_vec_vi :
vec_start = using_skipover_handler() ? skipover_except_vec_vi :
except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
ori_offset = except_vec_vi_ori - vec_start + 2;
@@ -2426,8 +2426,8 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
rollback_handle_int : handle_int);
set_except_vector(EXCCODE_INT, using_skipover_handler() ?
skipover_handle_int : handle_int);
set_except_vector(EXCCODE_MOD, handle_tlbm);
set_except_vector(EXCCODE_TLBL, handle_tlbl);
set_except_vector(EXCCODE_TLBS, handle_tlbs);

View File

@@ -275,6 +275,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
unsigned long pmm;
u8 pmlen;
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
return -EINVAL;
if (is_compat_thread(ti))
return -EINVAL;
@@ -330,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
struct thread_info *ti = task_thread_info(task);
long ret = 0;
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
return -EINVAL;
if (is_compat_thread(ti))
return -EINVAL;

View File

@@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
enum misaligned_access_type {
MISALIGNED_STORE,
MISALIGNED_LOAD,
};
static const struct {
const char *type_str;
int (*handler)(struct pt_regs *regs);
} misaligned_handler[] = {
[MISALIGNED_STORE] = {
.type_str = "Oops - store (or AMO) address misaligned",
.handler = handle_misaligned_store,
},
[MISALIGNED_LOAD] = {
.type_str = "Oops - load address misaligned",
.handler = handle_misaligned_load,
},
};
static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
{
irqentry_state_t state;
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
local_irq_enable();
} else {
state = irqentry_nmi_enter(regs);
}
if (handle_misaligned_load(regs))
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - load address misaligned");
if (misaligned_handler[type].handler(regs))
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
misaligned_handler[type].type_str);
if (user_mode(regs)) {
local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
if (handle_misaligned_load(regs))
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - load address misaligned");
irqentry_nmi_exit(regs, state);
}
}
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
do_trap_misaligned(regs, MISALIGNED_LOAD);
}
asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
{
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
if (handle_misaligned_store(regs))
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - store (or AMO) address misaligned");
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
if (handle_misaligned_store(regs))
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - store (or AMO) address misaligned");
irqentry_nmi_exit(regs, state);
}
do_trap_misaligned(regs, MISALIGNED_STORE);
}
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,

View File

@@ -88,6 +88,13 @@
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
#define INSN_MATCH_C_LHU 0x8400
#define INSN_MASK_C_LHU 0xfc43
#define INSN_MATCH_C_LH 0x8440
#define INSN_MASK_C_LH 0xfc43
#define INSN_MATCH_C_SH 0x8c00
#define INSN_MASK_C_SH 0xfc43
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
#if defined(CONFIG_64BIT)
@@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
int __ret; \
\
if (user_mode(regs)) { \
__ret = __get_user(insn, (type __user *) insn_addr); \
__ret = get_user(insn, (type __user *) insn_addr); \
} else { \
insn = *(type *)insn_addr; \
__ret = 0; \
@@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
fp = 1;
len = 4;
#endif
} else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
len = 2;
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
len = 2;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else {
regs->epc = epc;
return -1;
@@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
len = 4;
val.data_ulong = GET_F32_RS2C(insn, regs);
#endif
} else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
len = 2;
val.data_ulong = GET_RS2S(insn, regs);
} else {
regs->epc = epc;
return -1;

View File

@@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
memcpy(cntx, reset_cntx, sizeof(*cntx));
spin_unlock(&vcpu->arch.reset_cntx_lock);
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
kvm_riscv_vcpu_fp_reset(vcpu);
kvm_riscv_vcpu_vector_reset(vcpu);

View File

@@ -38,7 +38,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -92,7 +91,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
CONFIG_SLUB_STATS=y
@@ -395,6 +393,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -405,6 +406,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_NET_ACT_VLAN=m
CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -628,8 +632,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VDPA=m
CONFIG_VDPA_SIM=m
CONFIG_VDPA_SIM_NET=m
CONFIG_VDPA_SIM_BLOCK=m
CONFIG_VDPA_USER=m
CONFIG_MLX5_VDPA_NET=m
CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -654,7 +666,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=y
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -724,11 +735,10 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
@@ -741,6 +751,8 @@ CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -756,7 +768,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -801,7 +812,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -812,9 +822,9 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CRYPTO_KRB5=m
CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y

View File

@@ -36,7 +36,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -86,7 +85,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
# CONFIG_COMPAT_BRK is not set
@@ -385,6 +383,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -395,6 +396,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_NET_ACT_VLAN=m
CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -618,8 +622,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VDPA=m
CONFIG_VDPA_SIM=m
CONFIG_VDPA_SIM_NET=m
CONFIG_VDPA_SIM_BLOCK=m
CONFIG_VDPA_USER=m
CONFIG_MLX5_VDPA_NET=m
CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -641,7 +653,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -711,6 +722,7 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
@@ -742,7 +754,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -788,7 +799,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -799,10 +809,10 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CRYPTO_KRB5=m
CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0

View File

@@ -70,7 +70,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_FS=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set

View File

@@ -602,7 +602,8 @@ SYM_CODE_START(stack_invalid)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
GET_LC %r2
mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_invalid

View File

@@ -428,6 +428,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return;
}
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
if (IS_ERR(zdev))
return;
list_add_tail(&zdev->entry, scan_list);
}

View File

@@ -55,6 +55,7 @@ do { \
goto err_label; \
} \
*((type *)dst) = get_unaligned((type *)(src)); \
barrier(); \
current->thread.segv_continue = NULL; \
} while (0)
@@ -66,6 +67,7 @@ do { \
if (__faulted) \
goto err_label; \
put_unaligned(*((type *)src), (type *)(dst)); \
barrier(); \
current->thread.segv_continue = NULL; \
} while (0)

View File

@@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
panic("Failed to sync kernel TLBs: %d", err);
goto out;
}
else if (current->mm == NULL) {
if (current->pagefault_disabled) {
if (!mc) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with pagefaults disabled but no mcontext");
}
if (!current->thread.segv_continue) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault without recovery target");
}
mc_set_rip(mc, current->thread.segv_continue);
current->thread.segv_continue = NULL;
goto out;
else if (current->pagefault_disabled) {
if (!mc) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with pagefaults disabled but no mcontext");
}
if (!current->thread.segv_continue) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault without recovery target");
}
mc_set_rip(mc, current->thread.segv_continue);
current->thread.segv_continue = NULL;
goto out;
}
else if (current->mm == NULL) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}

View File

@@ -2368,6 +2368,7 @@ config STRICT_SIGALTSTACK_SIZE
config CFI_AUTO_DEFAULT
bool "Attempt to use FineIBT by default at boot time"
depends on FINEIBT
depends on !RUST || RUSTC_VERSION >= 108800
default y
help
Attempt to use FineIBT by default at boot time. If enabled,

View File

@@ -17,10 +17,12 @@ struct ucode_cpu_info {
void load_ucode_bsp(void);
void load_ucode_ap(void);
void microcode_bsp_resume(void);
bool __init microcode_loader_disabled(void);
#else
static inline void load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
static inline bool __init microcode_loader_disabled(void) { return false; }
#endif
extern unsigned long initrd_start_early;

View File

@@ -1098,15 +1098,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
unsigned int cpuid_1_eax;
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
cpuid_1_eax = native_cpuid_eax(1);
if (!find_blobs_in_containers(&cp))
return -EINVAL;

View File

@@ -41,8 +41,8 @@
#include "internal.h"
static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true;
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr = false;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
@@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
u32 lvl, dummy, i;
u32 *levels;
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
return false;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels;
@@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
return false;
}
static bool __init check_loader_disabled_bsp(void)
bool __init microcode_loader_disabled(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
const char *cmdline = boot_command_line;
const char *option = __dis_opt_str;
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
if (native_cpuid_ecx(1) & BIT(31))
if (dis_ucode_ldr)
return true;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
if (amd_check_current_patch_level())
return true;
}
if (cmdline_find_option_bool(cmdline, option) <= 0)
dis_ucode_ldr = false;
/*
* Disable when:
*
* 1) The CPU does not support CPUID.
*
* 2) Bit 31 in CPUID[1]:ECX is clear
* The bit is reserved for hypervisor use. This is still not
* completely accurate as XEN PV guests don't see that CPUID bit
* set, but that's good enough as they don't land on the BSP
* path anyway.
*
* 3) Certain AMD patch levels are not allowed to be
* overwritten.
*/
if (!have_cpuid_p() ||
native_cpuid_ecx(1) & BIT(31) ||
amd_check_current_patch_level())
dis_ucode_ldr = true;
return dis_ucode_ldr;
}
@@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
unsigned int cpuid_1_eax;
bool intel = true;
if (!have_cpuid_p())
if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
dis_ucode_ldr = true;
if (microcode_loader_disabled())
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
return;
}
if (check_loader_disabled_bsp())
return;
if (intel)
load_ucode_intel_bsp(&early_data);
else
@@ -159,6 +164,11 @@ void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
/*
* Can't use microcode_loader_disabled() here - .init section
* hell. It doesn't have to either - the BSP variant must've
* parsed cmdline already anyway.
*/
if (dis_ucode_ldr)
return;
@@ -810,7 +820,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
int error;
if (dis_ucode_ldr)
if (microcode_loader_disabled())
return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL)

View File

@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
return 0;
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
uci.mc = get_microcode_blob(&uci, true);

View File

@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD

View File

@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
*ptr = (unsigned long)ptep + PAGE_OFFSET;
#ifdef CONFIG_MICROCODE_INITRD32
/* Running on a hypervisor? */
if (native_cpuid_ecx(1) & BIT(31))
return;
params = (struct boot_params *)__pa_nodebug(&boot_params);
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
return;

View File

@@ -466,10 +466,18 @@ SECTIONS
}
/*
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
* COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
* this. Let's assume that nobody will be running a COMPILE_TEST kernel and
* let's assert that fuller build coverage is more valuable than being able to
* run a COMPILE_TEST kernel.
*/
#ifndef CONFIG_COMPILE_TEST
/*
* The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
*/
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
#endif
/* needed for Clang - see arch/x86/entry/entry.S */
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);

View File

@@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
kvm_mmu_free_obsolete_roots(vcpu);
/*
* Checking root.hpa is sufficient even when KVM has mirror root.
* We can have either:

View File

@@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
@@ -7669,32 +7670,6 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
}
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
/*
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
* can simply ignore such slots. But if userspace is making memory
* PRIVATE, then KVM must prevent the guest from accessing the memory
* as shared. And if userspace is making memory SHARED and this point
* is reached, then at least one page within the range was previously
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
* Zapping SPTEs in this case ensures KVM will reassess whether or not
* a hugepage can be used for affected ranges.
*/
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
/* Unmap the old attribute page. */
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
range->attr_filter = KVM_FILTER_SHARED;
else
range->attr_filter = KVM_FILTER_PRIVATE;
return kvm_unmap_gfn_range(kvm, range);
}
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
@@ -7713,6 +7688,69 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
}
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
struct kvm_memory_slot *slot = range->slot;
int level;
/*
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
* can simply ignore such slots. But if userspace is making memory
* PRIVATE, then KVM must prevent the guest from accessing the memory
* as shared. And if userspace is making memory SHARED and this point
* is reached, then at least one page within the range was previously
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
* Zapping SPTEs in this case ensures KVM will reassess whether or not
* a hugepage can be used for affected ranges.
*/
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
if (WARN_ON_ONCE(range->end <= range->start))
return false;
/*
* If the head and tail pages of the range currently allow a hugepage,
* i.e. reside fully in the slot and don't have mixed attributes, then
* add each corresponding hugepage range to the ongoing invalidation,
* e.g. to prevent KVM from creating a hugepage in response to a fault
* for a gfn whose attributes aren't changing. Note, only the range
* of gfns whose attributes are being modified needs to be explicitly
* unmapped, as that will unmap any existing hugepages.
*/
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
gfn_t start = gfn_round_for_level(range->start, level);
gfn_t end = gfn_round_for_level(range->end - 1, level);
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
if ((start != range->start || start + nr_pages > range->end) &&
start >= slot->base_gfn &&
start + nr_pages <= slot->base_gfn + slot->npages &&
!hugepage_test_mixed(slot, start, level))
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
if (end == start)
continue;
if ((end + nr_pages) > range->end &&
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
!hugepage_test_mixed(slot, end, level))
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
}
/* Unmap the old attribute page. */
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
range->attr_filter = KVM_FILTER_SHARED;
else
range->attr_filter = KVM_FILTER_PRIVATE;
return kvm_unmap_gfn_range(kvm, range);
}
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, int level, unsigned long attrs)
{

View File

@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
kvm_mmu_reset_context(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_smm_changed);
void process_smi(struct kvm_vcpu *vcpu)
{

View File

@@ -3173,9 +3173,14 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
kvfree(svm->sev_es.ghcb_sa);
}
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}
static void dump_ghcb(struct vcpu_svm *svm)
{
struct ghcb *ghcb = svm->sev_es.ghcb;
struct vmcb_control_area *control = &svm->vmcb->control;
unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */
@@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
return;
}
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
/*
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
* used to handle the exit. If the guest has since modified the GHCB
* itself, dumping the raw GHCB won't help debug why KVM was unable to
* handle the VMGEXIT that KVM observed.
*/
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
}
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
@@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;

View File

@@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
kvm_cpu_svm_disable();
amd_pmu_disable_virt();
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
static int svm_enable_virtualization_cpu(void)
@@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
}
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
return 0;
}
@@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
#ifdef CONFIG_CPU_MITIGATIONS
static DEFINE_SPINLOCK(srso_lock);
static atomic_t srso_nr_vms;
static void svm_srso_clear_bp_spec_reduce(void *ign)
{
struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
if (!sd->bp_spec_reduce_set)
return;
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
sd->bp_spec_reduce_set = false;
}
static void svm_srso_vm_destroy(void)
{
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
return;
if (atomic_dec_return(&srso_nr_vms))
return;
guard(spinlock)(&srso_lock);
/*
* Verify a new VM didn't come along, acquire the lock, and increment
* the count before this task acquired the lock.
*/
if (atomic_read(&srso_nr_vms))
return;
on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
}
static void svm_srso_vm_init(void)
{
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
return;
/*
* Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
* transition, i.e. destroying the last VM, is fully complete, e.g. so
* that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
*/
if (atomic_inc_not_zero(&srso_nr_vms))
return;
guard(spinlock)(&srso_lock);
atomic_inc(&srso_nr_vms);
}
#else
static void svm_srso_vm_init(void) { }
static void svm_srso_vm_destroy(void) { }
#endif
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
!sd->bp_spec_reduce_set) {
sd->bp_spec_reduce_set = true;
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
svm->guest_state_loaded = true;
}
@@ -2231,6 +2287,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
*/
if (!sev_es_guest(vcpu->kvm)) {
clear_page(svm->vmcb);
#ifdef CONFIG_KVM_SMM
if (is_smm(vcpu))
kvm_smm_changed(vcpu, false);
#endif
kvm_vcpu_reset(vcpu, true);
}
@@ -5036,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
sev_vm_destroy(kvm);
svm_srso_vm_destroy();
}
static int svm_vm_init(struct kvm *kvm)
@@ -5061,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm)
return ret;
}
svm_srso_vm_init();
return 0;
}

View File

@@ -335,6 +335,8 @@ struct svm_cpu_data {
u32 next_asid;
u32 min_asid;
bool bp_spec_reduce_set;
struct vmcb *save_area;
unsigned long save_area_pa;

View File

@@ -4597,7 +4597,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
{
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
}
@@ -11493,7 +11493,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
u32 sync_valid_fields;
u64 sync_valid_fields;
int r;
r = kvm_mmu_post_init_vm(vcpu->kvm);

View File

@@ -899,8 +899,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cond_mitigation(tsk);
/*
* Let nmi_uaccess_okay() and finish_asid_transition()
* know that CR3 is changing.
* Indicate that CR3 is about to change. nmi_uaccess_okay()
* and others are sensitive to the window where mm_cpumask(),
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
@@ -1204,8 +1205,16 @@ static void flush_tlb_func(void *info)
static bool should_flush_tlb(int cpu, void *data)
{
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
struct flush_tlb_info *info = data;
/*
* Order the 'loaded_mm' and 'is_lazy' against their
* write ordering in switch_mm_irqs_off(). Ensure
* 'is_lazy' is at least as new as 'loaded_mm'.
*/
smp_rmb();
/* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false;
@@ -1214,8 +1223,15 @@ static bool should_flush_tlb(int cpu, void *data)
if (!info->mm)
return true;
/*
* While switching, the remote CPU could have state from
* either the prev or next mm. Assume the worst and flush.
*/
if (loaded_mm == LOADED_MM_SWITCHING)
return true;
/* The target mm is loaded, and the CPU is not lazy. */
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
if (loaded_mm == info->mm)
return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */

View File

@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
"mov $0, %0\n" \
"movl $__get_kernel_nofault_faulted_%=,%1\n" \
"mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \

View File

@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
"mov $0, %0\n" \
"movq $__get_kernel_nofault_faulted_%=,%1\n" \
"mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \

View File

@@ -480,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
* the original BIO sector so that blk_zone_write_plug_bio_endio() can
* lookup the zone write plug.
*/
if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
if (req_op(rq) == REQ_OP_ZONE_APPEND ||
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
bio->bi_iter.bi_sector = rq->__sector;
}
void blk_zone_write_plug_bio_endio(struct bio *bio);

View File

@@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio)
*/
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
return -EPERM;
fallthrough;
/* rt has prio field too */
case IOPRIO_CLASS_BE:
if (level >= IOPRIO_NR_LEVELS)
return -EINVAL;
break;
case IOPRIO_CLASS_BE:
case IOPRIO_CLASS_IDLE:
break;
case IOPRIO_CLASS_NONE:

View File

@@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
vdev->timeout.state_dump_msg = 10;
vdev->timeout.state_dump_msg = 100;
}
}

View File

@@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
err_erase_xa:
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock:
mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_create *args = data;
struct ivpu_cmdq *cmdq;
int ret;
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
@@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
return cmdq ? 0 : -ENOMEM;
}
@@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_destroy *args = data;
struct ivpu_cmdq *cmdq;
u32 cmdq_id;
u32 cmdq_id = 0;
int ret;
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
if (!cmdq || cmdq->is_legacy) {
ret = -ENOENT;
goto err_unlock;
} else {
cmdq_id = cmdq->id;
ivpu_cmdq_destroy(file_priv, cmdq);
ret = 0;
}
cmdq_id = cmdq->id;
ivpu_cmdq_destroy(file_priv, cmdq);
mutex_unlock(&file_priv->lock);
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
return 0;
err_unlock:
mutex_unlock(&file_priv->lock);
/* Abort any pending jobs only if cmdq was destroyed */
if (!ret)
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
ivpu_rpm_put(vdev);
return ret;
}

View File

@@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
struct device_driver *drv = READ_ONCE(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
@@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
/* @drv may not be valid when we're called from the IOMMU layer */
if (ret || !dev->driver || drv->driver_managed_dma)
/* @dev->driver may not be valid when we're called from the IOMMU layer */
if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);

View File

@@ -505,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
lo->lo_min_dio_size = loop_query_min_dio_size(lo);
}
static int loop_check_backing_file(struct file *file)
{
if (!file->f_op->read_iter)
return -EINVAL;
if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
return -EINVAL;
return 0;
}
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -526,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
error = loop_check_backing_file(file);
if (error)
return error;
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -963,6 +978,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
return -EINVAL;
error = loop_check_backing_file(file);
if (error)
return error;
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */

View File

@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
raw_spin_lock(&i8253_lock);
guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)

View File

@@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
}
ffa_rx_release();
if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);

View File

@@ -255,6 +255,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
if (!dev)
return NULL;
/* Drop the refcnt bumped implicitly by device_find_child */
put_device(dev);
return to_scmi_dev(dev);
}

View File

@@ -1248,7 +1248,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
struct scmi_xfer *xfer, ktime_t stop,
bool *ooo)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -1257,7 +1258,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
try_wait_for_completion(&xfer->done) ||
(*ooo = try_wait_for_completion(&xfer->done)) ||
ktime_after(ktime_get(), stop);
}
@@ -1274,15 +1275,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
bool ooo = false;
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
xfer, stop));
if (ktime_after(ktime_get(), stop)) {
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
stop, &ooo));
if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);

View File

@@ -1614,11 +1614,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);

View File

@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
/**
* amdgpu_choose_low_power_state
*
* @adev: amdgpu_device_pointer
*
* Choose the target low power state for the GPU
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
if (adev->in_runpm)
return;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
}
#endif /* CONFIG_SUSPEND */

View File

@@ -4907,28 +4907,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
* It is used to evict resources from the device before the system goes to
* sleep while there is still access to swap.
* It is used to set the appropriate flags so that eviction can be optimized
* in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
fallthrough;
case PM_SUSPEND_PREPARE:
r = amdgpu_device_evict_resources(adev);
/*
* This is considered non-fatal at this time because
* amdgpu_device_prepare() will also fatally evict resources.
* See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
*/
if (r)
drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
break;
case PM_POST_HIBERNATION:
adev->in_s4 = false;
break;
}
@@ -4949,15 +4941,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
amdgpu_choose_low_power_state(adev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
goto unprepare;
return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4968,15 +4958,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
goto unprepare;
return r;
}
return 0;
unprepare:
adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
return r;
}
/**

View File

@@ -2615,13 +2615,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
r = amdgpu_device_resume(drm_dev, true);
adev->in_s4 = false;
return r;
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2634,9 +2629,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}

View File

@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000

View File

@@ -41,7 +41,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -32,7 +32,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
if (amdgpu_sriov_vf(adev)) {
/* this is fine because SR_IOV doesn't remap the register */
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
}
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,

View File

@@ -35,7 +35,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -32,7 +32,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503

View File

@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f

View File

@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f

View File

@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
adev->gfx.config.gb_addr_config, 0, indirect);
}
/**

View File

@@ -45,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),

View File

@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0

View File

@@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
adev->gfx.config.gb_addr_config, 0, indirect);
return;
}

View File

@@ -673,15 +673,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
acrtc->dm_irq_params.vrr_params.supported &&
acrtc->dm_irq_params.freesync_config.state ==
VRR_STATE_ACTIVE_VARIABLE) {
acrtc->dm_irq_params.vrr_params.supported) {
bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
dc_stream_adjust_vmin_vmax(adev->dm.dc,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
}
}
/*
@@ -12743,7 +12749,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
@@ -12752,22 +12758,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
goto out;
}
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && p_notify->aux_reply.length &&
(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
if (payload->length != p_notify->aux_reply.length) {
DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
p_notify->aux_reply.length,
payload->address, payload->length);
*operation_result = AUX_RET_ERROR_INVALID_REPLY;
goto out;
}
if (!payload->write && p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
}
/* success */
ret = p_notify->aux_reply.length;

View File

@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
/*
* This function handles both native AUX and I2C-Over-AUX transactions.
*/
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
result = 0;
result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
if (payload.write && result >= 0)
result = msg->size;
/*
* result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
*/
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes. Force 0 to retry*/
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = 0;
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
}
if (result < 0)
if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
}

View File

@@ -234,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
DC_FP_END();
if (!result)
return false;
@@ -277,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
DC_FP_END();
if (!is_supported)
return false;
@@ -288,16 +292,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
DC_FP_START();
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
DC_FP_END();
return out;
}

View File

@@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
static struct scaler_data *get_scaler_data_for_plane(
const struct dc_plane_state *in,
struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
if (!scaler_data)
return;
get_scaler_data_for_plane(in, context, scaler_data);
struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,

View File

@@ -2114,8 +2114,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2501,14 +2499,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
DC_FP_END();
return true;
create_fail:
DC_FP_END();
dcn32_resource_destruct(pool);
return false;

View File

@@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
break;
len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
}
if (recovery)

View File

@@ -242,7 +242,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
bool is_mst = intel_dp->is_mst;
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp_x16;

View File

@@ -1001,6 +1001,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
/* Don't decrement num_waiters for req where increment was skipped */
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
@@ -1029,11 +1033,15 @@ void intel_rps_boost(struct i915_request *rq)
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
/*
* Skip queuing boost work if frequency is already boosted,
* but still increment num_waiters.
*/
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,

View File

@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
static const struct drm_display_mode auo_g101evn010_mode = {
.clock = 68930,
.hdisplay = 1280,
.hsync_start = 1280 + 82,
.hsync_end = 1280 + 82 + 2,
.htotal = 1280 + 82 + 2 + 84,
.vdisplay = 800,
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
static const struct display_timing auo_g101evn010_timing = {
.pixelclock = { 64000000, 68930000, 85000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 8, 64, 256 },
.hback_porch = { 8, 64, 256 },
.hsync_len = { 40, 168, 767 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 4, 8, 100 },
.vback_porch = { 4, 8, 100 },
.vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
.modes = &auo_g101evn010_mode,
.num_modes = 1,
.timings = &auo_g101evn010_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};

View File

@@ -7,20 +7,6 @@
#include <linux/page-flags.h>
#include <linux/swap.h>
/*
* Casting from randomized struct file * to struct ttm_backup * is fine since
* struct ttm_backup is never defined nor dereferenced.
*/
static struct file *ttm_backup_to_file(struct ttm_backup *backup)
{
return (void *)backup;
}
static struct ttm_backup *ttm_file_to_backup(struct file *file)
{
return (void *)file;
}
/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
@@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
@@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
* @intr: Try to perform waits interruptable or at least killable.
* @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
struct file *filp = ttm_backup_to_file(backup);
struct address_space *mapping = filp->f_mapping;
struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
@@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
* the folio size- and usage.
*/
s64
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
struct file *filp = ttm_backup_to_file(backup);
struct address_space *mapping = filp->f_mapping;
struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
@@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
void ttm_backup_fini(struct ttm_backup *backup)
void ttm_backup_fini(struct file *backup)
{
fput(ttm_backup_to_file(backup));
fput(backup);
}
/**
@@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
*
* Create a backup utilizing shmem objects.
*
* Return: A pointer to a struct ttm_backup on success,
* Return: A pointer to a struct file on success,
* an error pointer on error.
*/
struct ttm_backup *ttm_backup_shmem_create(loff_t size)
struct file *ttm_backup_shmem_create(loff_t size)
{
struct file *filp;
filp = shmem_file_setup("ttm shmem backup", size, 0);
return ttm_file_to_backup(filp);
return shmem_file_setup("ttm shmem backup", size, 0);
}

View File

@@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
* if successful, populate the page-table and dma-address arrays.
*/
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
struct ttm_backup *backup,
struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
@@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
struct ttm_backup *backup = tt->backup;
struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
struct ttm_backup *backup = tt->backup;
struct file *backup = tt->backup;
struct page *page;
unsigned long handle;
gfp_t alloc_gfp;

View File

@@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
*/
int ttm_tt_setup_backup(struct ttm_tt *tt)
{
struct ttm_backup *backup =
struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))

View File

@@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
static void
v3d_sched_skip_reset(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
spin_lock(&sched->job_list_lock);
list_add(&sched_job->list, &sched->pending_list);
spin_unlock(&sched->job_list_lock);
}
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
/* If we've made progress, skip reset and let the timer get
* rearmed.
/* If we've made progress, skip reset, add the job to the pending
* list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}

View File

@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
}
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {

Some files were not shown because too many files have changed in this diff Show More